var/home/core/zuul-output/0000755000175000017500000000000015136423615014533 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136432745015503 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000253672415136432657020305 0ustar corecore5zikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD  ?+Eڤ펯_ˎ6Ϸ7+%f?長ox[o8W56@% oo/q3m^]/o?8.7oW}ʋghewx/mX,ojŻ ^Tb3b#׳:}=p7뼝ca㑔`e0I1Q!&ѱ[/o^{W-{t3_U|6 x)K#/5ΌR"ggóisR)N %emOQ/Ϋ[oa0vs68/Jʢ ܚʂ9ss3+aô٥J}{37FEbп3 FKX1QRQlrTvb)E,s)Wɀ;$#LcdHM%vz_. o~I|3j dF{ "IΩ?PF~J~ ` 17ׅwڋًM)$Fiqw7Gt7L"u 0V9c  ˹dvYļU[ Z.׿-h QZ*U1|t5wKOؾ{mk b2 ܨ;RJK!b>JR*kl|+"N'C_#a7]d]sJg;;>Yp׫,w`ɚ'd$ecwŻ^~7EpQС3DCS[Yʧ?DDS aw߾)VxX帟AB}nyи0stĈCo.:wAZ{sy:7qsWctx{}n-+ZYsI{/.Ra9XcђQ0FK@aEDO2es ׇN# ZF͹b,*YVi+$<QMGhC}^}?BqG!(8l K3T[<~6]90}(*T7siv'=k 9Q2@vN ( R['>v*;o57sp$3ncx!>t®W>]tF-iܪ%GYbaRvHa}dkD̶*';ک|s_}8yj,('GrgTZ'U鋊TqOſ * /Ijo!՟8`"j}zӲ$k3jS|C7;A)͎V.r?t\WU1ojjr<~Tq> `=tJ!aݡ=h6Yݭw}?lѹ`f_" J9w4ts7NG GGG]ҡgc⌝M b/Ζlpah E ur C&`XR JcwB~R2EL9j7e\(Uё$׿atyХ?*t5z\+`/ErVQUxMҔ&ۈt.3;eg_O ξL1KiYLizpV:C5/=v-}҅"o ']쌕|tϓX8nJ*A*%J[T2pI1Je;s_[,Ҩ38_ь ͰM0ImY/MiVJ5&jNgBt90v߁R:~U jځU~oN9xԞ~J|dݤ߯R> kH&Y``:"s ayiBq)u%'4 yܽ yW0 -i̭uJ{KưЖ@+UBj -&JO x@}DS.€>3T0|9ē7$3z^.I< )9qf e%dhy:O40n'c}c1XҸuFiƠIkaIx( +")OtZ l^Z^CQ6tffEmDφǽ{QiOENG{P;sHz"G- >+`قSᔙD'Ad ѭj( ہO r:91v|ɛr|٦/o{C Ӹ!uWȳ)gjw&+uߕt*:͵UMQrN@fYDtEYZb4-UCqK٪L.2teB ˛"ո{Gci`du듎q+;C'16FgVlWaaB)"F,u@30YQg˾_YҊŏ#_f^ TD=VAKNl4Kš4GScѦa0 J ()¾5m'p/\խX\=z,Mw˭x:qu礛WԓL!I xӤ1(5AKRVF2ɌУլ F "vuhc=JS\kkZAY`R"Hr1]%oR[^oI]${&L8<=#0yaKL: JJl r;t#H+B|ɧJiM cm)>H=l}.^\ݧM<lu Y> XH\z:dHElL(uHR0i#q%]!=t_쾋-, vW~* ^g/5n]FhNU˿oۂ6C9C7sn,kje*;iΓA7,Q)-,=1A sK|ۜLɽy]ʸEO<-YEqKzϢ \{>dDLF amKGm+`VLJsC>?5rk{-3Ss`y_C}Q v,{*)ߎ% qƦat:D=uNvdߋ{Ny[$ {ɴ6hOI']dC5`t9:GO: FmlN*:g^;T^B0$B%C6Θ%|5u=kkN2{'FEc* A>{avdt)8|mg定TN7,TEXt+`F P |ɧ<Ғ8_iqE b}$B#fethBE;1"l r  B+R6Qp%;R8P󦟶Ub-L::;Ⱦ7,VW.JE:PgXoΰUv:ΰdɆΰ (ΰ0eTUgXun[g, ׽-t!X򴱞_aM:E.Qg1DllЊE҉L ehJx{̗Uɾ?si&2"C]u$.`mjmƒVe9f6NŐsLu6fe wkىKR%f"6=rw^)'Hz }x>1yFX09'A%bDb0!i(`Z;TyֻΗ|ִ0-6dAC5t[OM91c:VJR9&ksvJ;0ɝ$krogB= FYtЩOte=?>T&O{Ll)HClba1PIFĀ":tu^}.&R*!^pHPQuSVO$.KMb.:DK>WtWǭKv4@Va3"a`R@gbu%_J5Ґ 3?lm$K/$s_. WM]̍"W%`lO2-"ew@bM?O %VO"d.wEр%}5zWˬQOS)ZbF p$^(2JцQImuzhpyXڈ2ͤh}/[g1ieQ*-=hiך5J))OrT 9KaͶ{߰+ednU$YD',jߎmc\cN#0"",tw>]rύW -a]hx&gs7,6BxzxօoFMA['҉F=NGD4sTq1HPloҊ7!JCz]~wހK0p\0͙npV)̍F$X8a-bp)5,] Bo|ؖA]Y`-jyL'8>JJ{>źuMp(jL!M7uTźmr(Uxbbqe5rZ HҘ3ڴ(|e@xB{ͻ Aw!5ޖ=8pߟTk@2pos/*W#@UTkտ,Fպ̥ 9MGb&0ۺ*u8@8/X[1fiMiT+9[ŗ6 BN=rR60#tE#u2k *+e7[YU6Msj$wբh+8kMZY9X\u7Kp:= ^҃5M>!6~ö9M^\r\ߺnqZV@z%=\#|-3ڝa$ΫM|-LsXY r# v&讳YE 6X̀v"@L'aE p6mD[%ZZv'#NC5CL]5ݶI5XK.N)Q!>zt?zpPC ¶.vBTcm"Bsp rjWhUuta^xN@˭d- T5 $4ذufw}}1L @5DO'h‡k;q 7= `!6зd B0C?]lja~ luq=T#>1k*Dls6vP9hS  ehC.3 @6ijvUuBY hBnb[ Fr#D7ćlA!:X lYE>#0JvʈɌ|\u,'Y˲.,;oOwOj-25Hݳ7 li0bSlbw=IsxhRbd+I]Y]JP}@.供SЃ??w w@KvKts[TSa /ZaDžPAEư07>~w3n:U/.P珀Yaٳ5Ʈ]խ4 ~fh.8C>n@>T%TDNIGW .Z#YmDvS|]My̥Q\ګ1F#șcq##rI$I.imWMUF>su0,gy(&TI޽*}w[ #j*ٚ- DIAmPvIɦ7聀t>G;_H;2ʗ6 h6QװxmR JQUbTP2j˔Ni)C)HKE"$ӝ!@2<Bq 2oh80,kNA7,?ע|tC3.㤣TiHEIǢƅaeGF$ u2`d)/-st{D1kl)Fa/TTmCԤ{"9b{ywSXE*m#3U ùRIvޏrJ`k|wJKH:O*OKy`( ݢe*{ ua Ȼݔhvׄӫ A^%f+[`sb˟ _.6KsjA Qsmd  O#F.Uf28ZAgy>y,d$C?v01q5e.Um>]RLa&r?+@6k&#l)I5_> ` D s5npo}/ؙq #a2V?X~.4O/'|//>l8MrHID2VSsMX^"NۯDc558c&'K0L /C5YDqNe~ض˸nErc֋@aw*r܀0 a {RQ^xZb [_tܡ&yЋ{ Sym^?̑sU~' Ԓ f\itu)b>5X -$sޕ6ql?N/e1N2i)i_TA|S2G4miBȨHM(2hys|F 94 DNlϒòκ-q|xC ,gKDzHR%t+E/wd#礱ºȄWEz o\JξB.wLKZ39(M +(PWՇfR6#ю3Ȋt ݪbh]MTw䀩S]'qf&)-_G;"1qz퇛0,#yiq$ՁɄ)KٮޓJ|̖D?:3mhW=rOf'/wѹ8BS8]`;=?,ڼ"ϴq*(A7? /W= #^ub"6q f+=^OI@߱^F[n4A#bYѤwd)J^Z{*ǥzw73LuaVad=$6)iI gC~.1%YmҪ+2gSt!8iIۛ*JgE7LGoş\bC}O i ycK1YhO6 /g:KT sPv6l+uN|!"VS^΄t*3b\N7dYܞLcn3rnNd8"is"1- ޑܧd[]~:'#;N(NknfV('I rcj2J1G<5 Nj̒Qh]ꍾZBn&Un' CyUM0nCj.&Oڣg\q0^Ϻ%4i" ZZG>Xr'XKc$2iσֹH<6N8HSg>uMik{Fm(W F@@{W+ߑ?X2hS4-=^YgpUHެbZ!y!ul@ڼ63" ۩:6=TZõ$E,ϓRV|G&$rr;J TtIHFE=RȬ]P pLm|?$%>Eü%mWO[>Xmw,*9.[G n >X8Ī;xW%dT:`ٓ~:QO,}j6j!yڦʲT:Pqҋh] H+&=>g| Z;D8ܶb:! Å{2:+au 6:!fF+0#+̬NY"!6a7#񕪰%:r|o5Znڧs?si/W qEU馥˟^_޶oڷOj'?nc]Rn\t3^邳塨Lɏ"8k8M~?M}OAH$77f|lgn I;.K*!<+"eK5c&`X:#;@B@[(K44sBFu M.MNWLlY]K᜴=/ VމYlϿ4i36$>m|_>9|dUA"{!$jKx E$K3hN(tÊ-#v#O N, 9g80Ǭ&VdӞ5W1!1KYd`,-*&>F~⯰&jb.~cNk BL_OG]Bv.A|'qT(Ol.' 4IE|@Iі)<-p JkQm1 `qacܗVc?)cl*&<}P媠E{-sVU>߇GUt\+n3X]Byoz)li$2cPs6D>TE-n# rve{椱I |p)U݋7yJw&PzDgi xs  xh\L r Ѥo Zt(I >|$>tnMdэoXf~TTX)QӅtӚe~=WtX-sJb?U'3X7J4l+Cj%LPFxŰAVG Y%.9Vnd8? ǫjU3k%E)OD:"Ϳ%E)=}l/'O"Q_4ILAٍKK7'lWQVm0c:%UEhZ].1lcazn2ͦ_DQP/2 re%_bR~r9_7*vrv |S.Z!rV%¢EN$i^B^rX؆ z1ǡXtiK`uk&LO./!Z&p:ˏ!_B{{s1>"=b'K=}|+: :8au"N@#=Ugzy]sTv||Aec Xi.gL'—Ʃb4AUqػ< &}BIrwZ\"t%>6ES5oaPqobb,v 2w s1,jX4W->L!NUy*Gݓ KmmlTbc[O`uxOp  |T!|ik3cL_ AvG i\fs$<;uI\XAV{ˍlJsŅjЙNhwfG8>Vڇg18 O3E*dt:|X`Z)|z&V*"9U_R=Wd<)tc(߯)Y]g5>.1C( .K3g&_P9&`|8|Ldl?6o AMҪ1EzyNAtRuxyn\]q_ߍ&zk.)Eu{_rjuWݚ;*6mMq!R{QWR=oVbmyanUn.Uqsy.?W8 r[zW*8nؿ[;vmcoW]"U;gm>?Z֒Z6`!2XY]-Zcp˿˘ɲ}MV<в~!?YXV+lx)RRfb-I7p)3XɯEr^,bfbKJ'@hX><[@ ,&,]$*բk-Yv5 '1T9!(*t 0'b@񲱥-kc6VnR0h& 0Z|ђ8 CGV[4xIIWN?Yt>lf@ Vi`D~ڇŁQLLkY <ZPKoma_u` !>Z;3F\dEB n+0Z ?&s{ 6(E|<ޭLk1Yn(F!%sx]>CTl9"و5 |ݹր|/#.w0ޒx"khD?O`-9C| &8֨O8VH5uH)28 Ǿ-R9~ +#e;U6]aD6Xzqd5y n';)VKL]O@b OIAG Lmc 2;\d˽$Mu>WmCEQuabAJ;`uy-u.M>9VsWٔo RS`S#m8k;(WAXq 8@+S@+' 8U˜z+ZU;=eTtX->9U-q .AV/|\ǔ%&$]1YINJ2]:a0OWvI.O6xMY0/M$ *s5x{gsəL3{$)ՆbG(}1wt!wVf;I&Xi43غgR 6 ݩJ$)}Ta@ nS*X#r#v6*;WJ-_@q.+?DK១btMp1 1Gȩ f,M`,Lr6E} m"8_SK$_#O;V 7=xLOu-ȹ2NKLjp*: 'SasyrFrcC0 ѱ LKV:U} -:U8t[=EAV$=i[mhm"roe5jqf$i>;V0eOޞ4ccc2J1TN.7q;"sդSP) 0v3-)-ٕAg"pZ: "ka+n!e߮lɹL V3Os\ဝ+A= 2䣔AzG\ ` \vc"Kj61O Px"3Pc /' PW*3GX liWv-6W&)cX |]O;C%8@*Z1%8Gk@5^NtY"Fbi8D'+_1&1 7U^k6v읨gQ`LRx+I&s5Www` q:cdʰ H`X;"}B=-/M~C>''1R[sdJm RD3Q{)bJatdq>*Ct/GǍ-`2:u)"\**dPdvc& HwMlF@a5`+F>ΰ-q>0*s%Q)L>$ćYV\dsEGز/:ٕycZtO 2ze31cDB/eWy!A/V4cbpWaPBIpqS<(lȣ'3K?e Z?ڠ8VSZM}pnqL f2D?mzq*a[~;DY〩b𻾋-]f8dBմVs6傊zF"daeY(R+q%sor|.v\sfa:TX%;3Xl= \k>kqBbB;t@/Cԍ)Ga[ r=nl-w/38ѮI*/=2!j\FW+[3=`BZWX Zd>t*UB)ؾ:_*Fu6Y3[yBPj|LcwaIuR;uUߍ|7.sW\zk﯊溺^TW^T\*6eqr/^T77WNZ7F_}-򲺺VWQ77V\_v?9?"Th $LqQjiXMlk1=VzpO֠24hf 1hi D{q:v%̈#v^nBi~MefZF >:/?Ac 1M'I`22؆DT!/j璓P åiw@wgRCsT~$U>ceއE)BI>UljO|Ty$ŋrwOtZ7$ "i 8U 7bSem'k?I+ػ޶&W/īy031&Nw2Fݔz+>/ LW8`LtG3yyRS,/"ez'}qrAX% 2~SR퀧NI S7Џ,JƗFhK˧g2{E"Y.w_Wiw7jx~/+܎,eqiсMXL]Z PSX1;!FLw㊊ Yfj,{|yJe ez(P+!Fdu 6= w;>Ac!}IzΘ65;MsQ> 0t#Hh>[ٚ/VgZHC!Crxзtj& ӾН ݿ {?EhfۯNâd+ `@k&d pzӡoik?Bh?]$<^K_{L{7otӱt%.1+T#xO=eM\ Y~V;;ludvQзD4*KXoR~wfKZ.ET͜ =3ge,`mPԊB*,,E9JYxy8n%vКwVtZ^dmT^gFdC#,m#43[7-&3l=PbQh5jg8 hd~DAu!Ɓ'#Eq9jc*[]qxWs0Q%/6yy,7n~bi[ִW$\)3s`Vg)IN br1OLWN%t)I󰹾.e,?_eK"`NOUgwtea9hyYΚU[bY*6%$ʊIk1l@e铥#i%u#5V_B~;̒l`v*~/RO N ^Gk_YZu%\4]4s_8W(liɧߡ<#[?)4eAv=i o-Og 䥁/$1n|ojA O+E4&G)AdrKB<ȠKX午i\f9˝v٪P[u;Mc]ĩƋѲ&u*wE7&'yuĊEkQXl7| ~VǕp(,召hJp8(.v Hsay<,(7"yJsP*;/4c$Y@hKG I'cOi ;%q8j<|hS(){o`W`H9 z !o^Wwo~ 񇌲4j m \e&Uvv)+{]ceC{5sh2ڍDPo@_ƂQku;Zƈ_CֺBAmvi\-OJ_N͊~@1!BcYGvm+pAP)DF@4*8MNٻ,cfቂf𴣲w#M+z/d d__&@j EWۀ믶늦⺩v_$4W4:=~6@,fYJĭ'u,"\vi S4&Jɑ}J{Z-qD+U$/Uނ9 a |SL*q[.~OYEq8HlM]RZ&$7鈓if*q0D KpE?;Aa@D_i,/2 {D EQ&E: rI3'!nnLݤ6QT|L9 m^eQѰM1bc3Û'Fa>mw K B+δ:-iAҰ&f:RW߈hb]GKڤʋ+Z,QD+2@TlYvy|ѸDК-%f쮖+~|&&t$>m7#шEa]<86iM,,/pVav)udz,o]Bt5_؞H wn>CQ<>j_FHMp GHR>J ~%+vK2` eVӑ\fFq>.LU˚N\)nSylռF qFƃzJ[R٧(!E\VA3Q9|:`!R,HOoٲ Wҹ B`x5.{xD:B}5(GUv< c{(`ZK$*_zuLEܗ\fkUARu3S/ogx;ˌעҀ0omUZw&iVT=LI:GTqUAXu=(W4פlѻ(t+'i0Wk@Y -*ҧ6UO L[fu/9ϥg'h N\B% % kYb ֲ mxoG\i{HJn|Uy[59! uUC^Wk9?M'-捥"xtM/A*_R,ZvtxIݓp]ݢsj]{/7oRHenˇZ“p1iP9Q9="R$ZtAR6j_eA!q9{"XvU5ڦ)V\ :oISeoեV*L{4QkX- 5}<4Zew4ڝT6 z"6E0.!G,5(/@J$QOMTd7 ,e&pvs=)`evK^DЮ\jx]oOʃ󬊓ruo+ (9iOg+w_mm' r,UԔK 7 R+ 2 MEJi|ڟB|]Ґ-gKlN ' j $)S_f7qH~/ͱKfdGlWEl7>>r)._uVtnS7_SJT95oj (1 /xQd1V\$9~*OOg'Yy!q 3 svʦ\c2ćbOkn"FɲD+CYѠP$V Tn܃ݣtFѴ+, smeOuc|咚ymYQI(E̷òIifDr] ]2Ow"2Ӟݭ71@iW-AZ`-%nVCwUԺl='0:2j D@Qnӹ"藬(Dt)iYwV Iz*)paCj[e䬎Cu3὿_-L$:8\2'6e#f+|? ZKyUSV2;s@ZũTu!]1ӓWڂ 諼͎n{qx*o, 0l%6>kLKKywT-!xiz䋬!՝>%,7 rBț :!v'M壽Zܪ{IwhS@DX8>jA=X4CuEU&OJ$1T*.[2s &dEnCCJ+b9t\X.zUYb=2 MzZH 3yhđWRWg")(j-~5UWI& 53UgMV]sm3۸{Fָ],nn(>m @z 3;q؞0y'w˲R@,D+z@r0}XFEor=#*h!- @:`jh-y/[M&qc. :Ch# 0܃@l8ahPf@Ѣw` 'ɇPCE!c|;GĶ^H!E#61d @qa402Dzy&1[MPƅ 6Q~ok bHSEU]4)d%3S lDxzM$PF;[8 2 TLgȐCp@Ps<}cEÉ4j;"=w3vs͹vl,m 7K'viK0 L=urfSb[V3mQ B?PBH|YgLNH=o# ;l6}[D&\!#ƙ7x] oF+[{7 9$`:I{&0(HA=gF:fmwkwμΜ5 WGk;g, V;Š@(b^d/03_Gcz\ctQ)%oC'|zG?y z|BxW!:foC\;%UYe -N5?O_^]FU>n?N(xKG1(:f՟u|I?Ꙝb$w c^X~G~N}M`}UC%e=t_MIg*X~,]R$*34V[t|8mԕG㨬uQEa V5ڤrȗсum]o꺁vHba*-ۣ sׇã/~~*vg;/$lGtı Snx!sm ޾>O䜋xWFnX  B :+X-BsCg@kFge]AP(IoDtHmDma֛#v|z p8:b,D"qQ7x ./'0$! 0@,mǼwuk ~xtH-Lg#AC/vw)FNwIN'r7 SsZ@iDw#8lu;"`;0D7)pi <x1'v#;ߍnbeQBT1$%x(n'P?w)aHr)#0Q%@o5BaF(S{ }J^{~{^8 +Roq5)s}Gߖ,@Һ/A7\uF9v=Ź0wDNӪ'0vMHxv4E@;rJAHm9"}KH'`la -?&.(OhoF,~'WJCХ+;@r|Wet`vA7p3^遁ڢwIBVV:߁0g]L tO@6*0Wgl)$v^<978s#dl;ssԡ@A+jT#n@Et"Sv&t"DS;8ؠ+n$VT.-n|ɫ]g=7Rp}Qptfx a\ )s}-\>,`c\' {$VD0pdk0)𼌊;#vSBJ/EYAFE>!:*l8Rf_^,9ދY:p9RYH@u gy5Qa%Vfu?%+z,SpkT QnJД%O7eEءףNa;!|?XQ2{6~R۳ز t Pk\8T+j8ZD7qE`d\pFܐC)7`o&ʪ&^Qo( ǃ%˕bzAJ2 韤!`%fT u;mn6R޽VX=,0VÃub6bb` p嫵FQxLzr<WmZU[|i~QZ%H0?-.ST.şrhl=I|oxj ,r@Sϧ"fld݀C"?Ncr.7$#?7 (18Rz=SrTB(3 v!tsG>e'b>FԼJQ3M'wG'zu(쾆I{,̶9VVN+uDW| pRx:WmUH~IqhCˠEX+ꦢa~"94hP.lBE%i1IWE!3|߼o>1sw8`[]x cQ6Me] hN^#"+SK6]ʫslX Ա)(yӠh0?O'GgpO F5f&],XZl䢎SyXd0+xB}>տ8'.LyZd9X&h<6TvqԌQ=Q` QDY?W!"VF3xtg̜0hx>BK/ĖPH9FPEEPѤ~?j[8Vyw$I*W,N1 OTZPCiT2kJOqo<~^q#(/ǡ YzJaZ}P^f+NBNȶ-\z"1 }ed:(fӞ:TY"=MwvG()k&cmc6j 9j%nKRBiM\yt輔ɂk^x&:"br~í-sw5 :SdJU*::ՂSdp$p I 0aBԬYeFm_P/iH?-v̖Z};Y/ }c*Kև;e6O +tܲLr-*Ԟ*2HGc)'h3܂+|׏r0[wÖd|a#y@'$o9%#\yLIG?WP1 I*?jॼ_902r/0ao\rd3{oZd۳h{+F=T?Vǧpl`6^.7=>^k>=@t+0l QxBgYGCT1 QE#DrJM>.|f5TtsM1Gn)HKG$<[ ŨyTE=TTo Aw8UO˟@*+o߀݂ڛ܎T 3Ta,96ɓ^S%6ci5GJ$..G%Qꦪ\K훩V\T$Z/1l+Y8ޜ&cNUh dt# .JZnA2XY&ʌ.5N`ac4 diE,6a Q6*?OcԿ ǛhU;1^h!m/ Qܐ;'D1Y\*\WpGem!qEg\ϱP8Ѵv.uELz4a]V#$OkI]S4񀮺jеd5́&VvUz+T<NOR /&JԂ#ߓTF|Ba >t*fTmMSbZI}J޷1V0ÄB_f=8u6(S#4ŕ8$ )aHYJՒP{dA!OM <̻PYL-/iJOAuMOIM6m΢&G5bp"|c<$K;>Ӷ}FJ)1j_{^f'3n6dNp翫zDVS3Q^ّ(8~yH+HӔ?yWf1ՙ# , ȁ$SEQl-濧zҀლ|]w=E7?|8M`qX=_nmܷc? bŪ|쉓7Џ~v)P}^Ec}~71^΁y[^x+i'LJU7Fꦯk۽_1=Mٔ 7\9*? }L磸 }.7?tD;kٽSiƔ#ώsM{d.4Vx:0hˉ3)i [p5z(;)$!Bw@K I{>?eF5[ LnV!fG$vpX=V-}}E0ܱ^1ҵWv,Z/C8tfԞ򗶡8x NdWzCK˜l R0buN, v}}0A-J`,0$"hRGabXg<=l. Nѥ|Ƭs-4)h+] y#=v]%cu$w]Ę?*um){ip svJݦq윛k3Ic~4Jҏ+S5 $5팩(y.l3'\HBL]n}ĈM9n2G8b$X >e.%Bc"%&ItuG* iwDМh=@gb™LEuk94+bRn8m1IQ )шR&C! 3_=eQʔN ,#Õ E QUEC7؅cDP,FTp#ns$8!(V&>A0gBlx 4\(4eu'+[ mrT}BȀ,b$rohp{NPUkbGÔ"gks !F {"o=I^+X#a"{DC+)I#cg)~ܼR7/41Y?`#5a%"dʪ?ۃsY8cn2yd#9(|[K,u d O ,\2 A+ 1\kߥ"m.Fҙ^Hpܑ|i*pQyDU}dR%\ nsnc֬6ev3!hB "InW&;s fpבK,))aLB ,nW:˿ 3NAg6MHʱw(e )v3 -2r\#0) qP\gbzLE+sNpLb$L$8j>SU!TШDf||3gF'r.IptB,^ֵdӾ]a:]f~OKgʆ9Q*(c-]8lA∐tj.$8\Z<]fTѓ*;>/k25Mք(_^ٵBAX y#"*=Fg{׋Bc` ?Ty;=6Kb:&K)( /1BҙE}"qI7J{d `'0).1Yf9{C I Yݼ5{0qo/$ [Y51]o^YOG?k3 RͪmBńx̊WrVc̘y%{Mě}Zyjo:'{ $m{$8[dr:I^-z.o,./FfՉ9MV#1*O:$8LN8Y0>+/E L5TŐ..ZB08zi- ӥy|qUA6uK>_N(0p?xTr09c+&!}>@rL:74.4 r볯ZaZaҳ1m"qwQKሑtcyiKr彼-''r$e~V& t?gG'Y&>!G Լ|2ʿ Hc?$ .^JZx:(Bb'ыY9!@NH < S.7.I2<Ss!F ) ,`rc!/8 {>Y CNyr^j{|'kwb47=m5M'w.o+8%fuC)_z('Øs>^>??8fHԈ2]sJy;t>~RQ!u^8b2RYq_&^oVz]U+BHLH6\0":J‘0(5.K;Fw5NhXV(F,u)vdz%qte<ٚJg>ڍc?&MW Z[ C5 7 .VNH:o4ښu1|'֙s)OItbVI':q_["q0cmF*5*aIK '0 e\WƿY?_?zc%fe%(~p.6Si&84f_@\D{.}IWBF_1YMi82 Bc9jUu.=7-7q<hٱ\ ײ܉LRsYĈ7 iP.e~ưŸUVOw48 $]hpZT䈡sC%ycC@H:y\.3% a^1 $>L\.|\M$MwcY.bƶj(H:ْސ(;1qy IF=EԜpk^ v^Hamd3[=`ݼ DD+ 38b4u#npSkJS |Be45HPIgo# ŀi.eJp`Rs\,6k5n?-+P NEMf(8Frno_NrIg]$85Mah'3&'QJ5vzؓNuMgfݙm+C `Ky&&w֯w$8jswǑE@o{ezifҩY5hNrטo˖ħ~^o"qal_h֍}ۖ)23mm춨$x6R«f=!̋дsH:ӔKhpזS1bXݾdS* rH;ZWk`PLRCI.DCCnt ML+CnL`$CPpI 7>RRpIdbűW@LJL2vAP&IK8މ?O:w)O,l-4e3kv~;|ӱo&.&R6EAY| BG76͞%^3\=Fxx8 px|5l\g oY/o&Ʌ0{?7&pGShp]lm|Ŭ06`?We\Tnqx2~\g&\y]CIf+\䯟ѻ_LQt@d2* $=_. eW0Fq 㣪Oцv|}/7`Cӱ@ t$QBFHQ%c`pW/+w=-6qG^Q b@e~\qx8dl~g3K]  w`~9Uuwy.y0? -Q}M"ܽ;"sWZ:}bU^p̿mdRbgyݒDx l+Y&+=Dv:,x؏gS&!xi:!P-EPVW>7|̕k֤_qf>޸$d%K4 uM~an̺4N@?zܺ++3Yp;aUIlGqj,m3?~& uL(%o4j[߱oW΄҄WZ8mRP ڣ nm$EG0.) dLDʌnR_Z/ [01 qV͖46 5Ť8H>^2ؤG*R 12%k-c'|qlY=e,dz'PAՋH pRAVj d/&|LCYoyw6DЕ!ֱT<ΑbV8;Mt6,%?RCN 7F Mf ]?_8._^QNjUSFH=hYE3:*pxNσ4i>]|= %KTXg!g)E.6]3WUmf2 #QϚHIy pʸsFJGvNyUNay #$êV_aXp((n)B0Վz4,[2V SfR^ox6DڐjklZ'{L]>2l-]Li,ՈI(^ Tx.QXkɊoJJ}@{2%͛W2"a%'b{Ï[=FX> h6:qYD0NP!32c=79kyZ; '3>an#`^Ots=1vZ`@_kwHF njt,zVEX㈮8{bZi|čjSW= O`$$4PP7{i;Z~_9R吏5 ռB#4lY轟<*o3i6 o{Ewm ߝ)|["v }A7jǧBֵ罶.VB]:Ox DI_tˆ(^H b-}a=W/O)ֆ!`[JfuwghWN(XڥxU4G8SOoۦg7aÏ:2O5X/1RZ{ ̰j-A\ښ㒃r?voQ>ܽ90a^_1,J)=߻'Gql"u~$k[.xMGIUۊom|xTr}Ͻ0'}[(0~#n ˁ\E0OE0;[8xHq_of;k7oaj0 ^ߺt1"`pMSՃ+֫.bԞMx5ShN8apϸXgU{_p|{um%ZEQ_:4uW>`9脵l}Yv  4]P"gq\JMI%[VϯV)\SKyt|Lhe/*öMsRJnQ+{Z^Mf)%U >7*(Ț`# QO₉P KnyX:m\%& pE9ior;jQtM̾|!(~ `HCH$U a/|&Agߌ~9Ӱɻn|!?x;n|B1sݼCsf&݃?SSq?kWo\AɉG& &ꪃF n'Pbbn녛r,8⤆PoH)* 1[he=⃘I{ 5h;H3EFEԓh05 N >^stqA "rF 1 w}F $v3LZ1g sFd5#@C1齭z}3EѲQ4.۱@Fbjhܵp?Cfz]P]4ebqg\c%WI]6R $E1THU5B2ꙁ&}Hhjl].ۯ/K ) c~>cYI萂NJH ${lK ÍFK8CY*2%͈s)1NL(g`(~|zd = S܄'2H*,τvyl@XR|;q6VPq缰2Vu0&CJXk&RmdLXXq0tQ u=FMn`Tɫ|AH{fbj-Zf!X)Ԛ5 .mYvoenE$91`Vi^ )`&GReV,0`d B-Sچ"5F QMq i[w>m1-ZwBcVMhyeH):*8U)M1!4Q LPJDp! HFdlA_PFv,ڑĎH%dZ*&šJ!EڠIVʒkvd3F*5S8cZ iʑ2.E/o(ޖ]3!MkbBV۴Zc6"tͪzD>?xKrZ@eO$1]s.#Ft@8j_ƇH&t))qD㍿$^rHm'Ƨ^V}މdcKcO ψCZDžR0J o=6@SS522j(9mFZ E)A!8m(VbG&br,wo$0Γn&.{ߐV .[JH|;BT*)KNZ̥1M3+/IrW 7b*Mt(@S,ZD xqxKk]h.Df8ASHX>GY"YM&ddkLƧ$8^k/I9S[o(ÀzqJin2,hSi1)NJj=N޵m$B J~ b/61b h'4$wߪ&%Q3QF7HX| Leܭ4>s24%%.0q,L.eQ$V Q*Yө:xE xbS"c$6 #)X0҄gNSpTpKRF\\vHIZ:2jM)BI8(;fr68`Y^RiɬTcG#en F`3%h#.N &4:AFAuHmN߲.>3ZfGGפq3h31':D LFۈjef0ʙs9X\@; 4.sumz`m@V=mfj"۴hm` uLaZkdVrC2N(jleddy>م>y(#29 tkތ1hVYswuBLhKJ,ahBb,N5a XIj>NűR|cRVlW9 Z<},Htjm,İeb4BGLEii3g4c:2ϣBY)T?` RgRHT/S"aquY ,ςg< ̈^Ɋӣ5&3Ϛ$d,q_ܠ87l?n@OdA+Mz Z#(+D+MTR} ,Vɹ 156ߜ=Jp!e!*bD eCic*5 m-I#4JLӪdOX֒fAϣIP̧K~yc4y?tYL qXD1Ӗhf%#2!-#8ک.xKnJjO~aJBEVNi.aZT$\ҧ\U#3BFsVI'ԲXR)lm`jafjX {d:=Z# "&'Pn`{3`~qQO`l {=C7 Eˉ=LJ(V i 8zvp=Z!ⴃH2*F˯ݚp&#@KTz̘JNX"븭cfDkTMOou*FuA&FZvM _wNx߅**n)ZO9G_q8qh1v>Mw=ͨtNڑbx &L[z QKR%ZuRrVj%A>VV5;q~=6yVچdqyu>'Y''(xWI(|>J-Pes) zXǂ،2[ k%q.q>g:Guu% Z K]Q𒧘yyzrQPvY૱ l_Zav˳w G>o:[|]()hLsCˏ~~jϿ1xPϟ-4,5|wmH5D1dKgh"]<D.Ak.T{K| 4Ρ!@֒5r/~z%W]:Sӫ Pv}'ꉷkھ-_0\O',os0Y+ _2t^1 zɥm{l'.FEFոD96&߻os5b ށsۨ] B*48/Ƴ0!c0(#_JXC4P]'}n'A )}z&txRKa K2 rD/PfF5[|-Y)6 :C=k[Q#'؞Cw%,t)gMp| &iz$yZa+nv:TXKU*~@ P:Oʊ} stqQKO(E_nыjkOpȡzN$ {-`+a YVC|B Xr};voXX@f}4kv@[ǽW7NZv0K9c읞۲-U!`'G Px0ˉdl~AVWg:VkT-̊yRg+F2ȳ3Z8`~"1]2; jqxfg>37QoJ%yѩIL[\0{w'fl] $gcrFEd-S҃iJﲖ$ZMOVk@>*$g뿻#[ֻʡy0ɀ*k}zW&UWY͵fӎ%phۢL"܂Z "mM]Hj(H|!_t;ȦZ 2rj\S*e|R}jr v&OɩRc[a )ci_ /WNPjP?],laś5JuAVFcݫW0Z>5%+_~h >ŧ0GL.UJg$;WfQqfԲW9>x5)~-~mRp|=CS[=%&Sy;w/"љ }1[i&1&[U॰ %B".>vpgg]&hEH3.SWaD}7wz}9Wl9/w,f/Xo- %.6Rx]33L`\7%|2&`W[?od|X F~^LZsPa#=zd:~A nPAXt)?{MAJ8̘c$ $01EY&"y֍y31o Ѕ 5$p)(k x rqL 9q("BX^FY0T}z7N5=ܠXu$F HvྥPFF4tTdH c[X%bL^Uܲ leL돰3z lRvnxct"<-I1&%7nOwAX\cpOI׺7V<|o~y>:#.M\+!m3jgs]_׫Ny߿6flFk_mn `k)SV͝q~~,a+qܟ՗W_^OOEi:F]zjv{ S'}|2{Z,t#nZx1d)~ITѴw3H'f/?f^;KmPS0 >)qt>c awsn9!CekIo7oVspUZnyrވ 1.oy1FQč4!"N%ьN KbkSELD$Q1թcQl%=p7p[Mim0*յE@r]&n/OɃ5 $ORo^D3_0M'E{hՏ}hjƯ,>1 f5~#Ǫ9wC퀺I0KG3c /P5 LG0O{ =ݯyIk-犵@=췺^+^;_R28qjY w}kкB Ad;R4/}ӿػFW s&,siwt-jY/S/,t:cȈ . o"^/qLAW@]Юs֟^tt8&=ߕOCӒcA(jҸW%~]y޳`߿oa˿v[ [ӥ,~;.ӗjczek[쵱>! /&2 % 5%~:_$g}-Z3ь"QIGcڋ@Uy`T+S~"m_B"0O͆t JCADž1dSoػ \&Rf"DjӉ*K2-yTT:Uͩ=z_g'?U/%%g%:)GzzbT5'%B Qyd"U 8rTsVsg8EJpnw B&Hu"z<.N|wE._x8SD3|iڭ+ Qb+R@k ڏnկ̶?oIȠ-h L)¾ a!(%WQ2`JCP_?_<gc^p>nfo9烻Q18~?9!_cᗏ 6t3,P"Y'.Λ8pb)8o mNHGCb2 EJZK) c"Kxqt9FG8.ur-iE@`,$ +Q4㜻R,wND,9SRu%u}6v^;E&!'c!j=_zvSGlFY$SS38L*w{t-湞kq]XSw䨛QGJ̗W|B )@Ēbm-ħ\ږtwhn)Д$8&g 8(^ ]`=aħz%1ν߱5cږ#g\3JHBH*!]6!7ħ:UVh7`ߋġtT*{1VvŞ$7^-୲K ~ʢA09^ F|+`eh?n G[ S-5 &˙ ,su%hҵ;sYTV@@-M5’h-qp"3(wdi@(.!n}~~bB{ˉQSϏۙvgH )CDRz9S{E<ٯEOe|]cD!hE==.$|NИu-'=oGD6Wr0&eYkonJZLcxPf@?;Wb`,@Z+srrj,cħrU9MBh`WFvPe]BsS%Y/*;?/ HvNuWIdNSzGOE9o|DsIД N@9+ WTG,BG?r-ZAQ7f-?2YCFZ^߼b`t/܏>$Eď70G^~ݵ˾|,j)kG@i8K 2ɀ1:z.ljOu"ҷ<>`e0~>zG澋Gu|TwZȦ Q #y&\]w/7aDVleE֋.j  RꖊLPWL4HD\ ^i5c;x<2 h9pQr6vwe(tfl4$m!$8t"ZJ V,GE(T4l+I.Bi%!K֛b)co08#z(n Dw "v刉(҆޴vw 9ivdaHym9]d*8%1b0[61Tk6AԈ rUa݁~si,*=^EF 2`rQS]^vp@iM3 sj[5@"lo Q#^+ox̒ GR\3p/Z֞0r<+e1'܌顮l|*ҷ\莛ތL,tdv--oFd})I $9(,9,vn.wq ,B`#1NِQv=fȐu BI:%>U[?EtѢB%)z- w-"OTyxc 1Ϸ'|xo( . 6QP`DooiL_Σ媃з^Ci%s:{:8kFOtucݨ#xJtW:5(" s77 Цetq;Cםħ|FՏ /<} %3)>]RlҴ4-J|kip401[U@,Ʃfk宧X$3b[GX0Pyk -M &k\ ]eouJMQLpX̣i(b#>Dy%# _00]L7tUG~o8~s- q cb  lMnya D*N5jvG2Z 3]+t:J||!myHl4 6,ѮΚᘃhΨ/J1 Q˩Κ0y)o|]˾}Z hxt9#΍klXni\nѧQmyΙ-?>fFT6 $Gt룰ƀAP 73V˜Mba-G6ɵJPbZV_5ZܠCf1S[TVQA YE+A$FSW,Ar(C$l Wx@6N0[T׵[}a3r6c7N̈́;(父q mǭ>s=R3\{v4:p3`aP1_%{V wZ]Z@>$< rA<π'ذV +]<B{ãQv~>-WKtԄlRkiS S ހيb6#AV!k5mi'/(窷t+rY?.dCl]LcqGlV~Qb0[5Dnpz>r*;SKȂo((f 8ZJ}E*b!U$'^wC9El__`#TV,lZ݃a G1ƽm&aGՍfoJ՜ȚWq} pULc(o1kgOMKLL_ҡv5YsRTl&gŋB.I ̩NskGal4Lvxмz"]CfFƌWnp-Wo#JPnF;ވ.ժ[8,K.UN.(ͲхylOt`fkI9?|2m!#\#nEjKw\ \_2l`uaTo%rPpi2'9kQx^#Dp}M: [T2dF /͈! ;UA= v,nocnl6tJT=e5Gqbi@.t渑(z_6TŻËEZJZPNkrIn;s5J%hF]0yr̋&\1yE/:[κbfonv1ԃdEb1 29r*L"(򶝗RUaħz_.P^ C:mgfL)'$S;0hIIYɣF& 8%:I'D2T SW1U$ TwJ!!tN QS,^?8ݠ#toQS].kňT 唩QvDQ[vF<Θz!s_o?\o.5h]l*P#&8$$]SdXW6N}.>D[6y;"Wb v.k..]~qC:{J|k)~Utor&+띉ۻuڜ:[ޕmE$Tev7å[mFxyF.wLT;S0ks4L!`_[ d9iZW{~o֘"^D`izD┭YP E9cMizK^7} 1 bQQ"% Iv)IσE5A:A@(#TlMcm![hw5\Z<{&>T ڹƞ=kѷ4͗n=qὊ$+(2+LШ|ܳ6N$BGDOu[m6i1dFȤ&H'P}&{ɚ9ZvyxF:6?b)?yWFJ8n1˄k|.vfenn˒"$WmI-d-flbX%>diGtoqϝf#lq=r{TGl6>~xr0x\=^9m]Pm].=7Q~Z~B/h%!~rKp]0fB`FW(gNyb;?-1fl|r]MOBD6%>% )`RiPTrn +G%hPLއ.o^KfyK>ôYŖ94\S!MY"SЪ7o#{9lזGP<,~ʟl|d>c]Fi[-]yBHV#k$ݠm6 BPp|D|w5G>chgvF!j36vg*.Z2xBbH;+љQPz5!0bI, XY 4&&z$9#?sYМ2eLM% ~yWsBUDoWB22+ =+9x,rϾ~ǽV:1*;阌wA61 t* [^L_M@%VػCW-ILePNdh|G|c-M@լ>#HUIjQ7Ҁy/O?e)Mx&4#%d@gYR Kk5?*uKbBp뮘0Vѵ҈r]{×=ѝ=FQ@֯nzTfz%K1NoNת;RA5?hQT 7|]Z! f?^44aDž`"S%:\jJ2\Tؘ;C|&Iïo.-(XV(JO~Dב}w,nC:ɡŌ E|iWsԀnf24f剷[ $)@B5> $̈Y!DX;W8Mdea Ӝ"siSPĩXG(""1CPu/EHb{H843IiGȄSF(2#9)d-5͘ Iいa8 xy c_mO3mc C4S27ii{r^[F_Q: ̒|ql mm"v.C#R!uE0XD[1A4fڠgۈ#Ɉ#""-uZםbkcq3Iu ԏsz0P9S]=)+x L)TV{AHi^f C|^Ϣgѐj:xlk5YJmi -gğaFߠ"i4M"5΄(9gmq2ìY;hA-A!T|.P0&F)(3Pk:5%(<V$b(2N#|طG}kXQ=m_0-utscx"*(v%t4ّ,f NPղ\2 rntۨѻ eڨ"U(jmk#]RG|Eׯ4!?'|._bhޡuqW) K~1F9&XhF(ė \˜jJM,1n1ė# EgJVNZ.gdK=whT#c%K^znA(%λW櫤;QgԱw!>n )dDEʁa&!Jgyh*(9 ڧd95PEo Pk 11T#T*1jFZPXiI.tFpodc=E#R%_/1XD[1FATK.J╼Z%R=Q5awrDOoW/Rј!'R\~UwVE}x‰RwN/Й-#wz;ռmKaonl9MeTzKLZsP`6wYjiwrx]xi؎q'h2m*NK=x9)b2KY;"X%,D'Pܒȳ`SJ_5/bI2kEA [(WL3Zwoo"ub~#(κXg6z̆<5x߆ީh#)~_1\}I<9T ^{-ub"\sbm,$-c'n\ԆHplqSM۵z@ AIF5=t*x/he 26U9R2R!,8h"%`&1—~R7QUaw}+160`'D|֏*5mŐ%j{E~10`E9Q)b/1ź-0RWm/8N\HYIU&M}2%2SO{|\߅Lg5:͡a% 7ﯮfWx;A `;oav ML6gMG^LuWD= "q)1*M#>U9I&O7׹e2J&_{4r׈rkFQI#*+Lx9`ي^JC%%U新$h :*d ,! (,!wYBh&/q*8j1 P6+~yJד]BFػ:*Br煄PG/Yc2@xW/FӺˀbwպSO½ utKvK~?G㕟>xȕˏa¦է{"qpŖ946#y,l-p`Lgմ~X|\#!81¨`6˛`DR."vi)RC$; ɒֶt+n gvE|Z1_wG!V1X*YvaO&-#zD"5]XYv=0QUUtphRGô_1'1lêL0KJB#8LuFYnK Q,YUx}P,BueeL {i('hR֟~eqDGn]tw/-:uL쏃i5s0_ѯ1ʡxV~7o?/#? %iut1~^'׾^.U%µiܠUaY@r58۫c\5) ]9__kb9`Y0yGͪgnx/=¦ħ$6{nH ΈW a+ zURڪ f gL@& Ҕpp"Y!{o8.vD_A!8]{ζ=`~r^09Akw+; whR~eiBpMT0.3/;#f^PevQT}HT2|$`G~fi.ē('TĞ9Ō.yd0E)ܕDx)̖)#Ŵ"y51t;8C<%@v<267Ld[m R_̰U?E]>7*w;VbٵcKyJD}9;v=۬gOś/rVjRE ~s3[!!F?D E w9Y Fn51q]a`FZY͘{Z͙J[L &3b8hLY/9GlfsUϚ?&mл-*0Ri8׎}f0KiRd'- |"u1ќ:6(zYZY)-T-iR5 1g#9񱣚!lI/BXN #Ak5EthNr3BK3͹!\֏;|27 Y=^?geP)z׶_jڨ_60tZȨ|-p\7k60SwO+#z1َ܍oK_0w _/vUci5DP_~37oݗ_f?vQMk[˖~€vR}npd ōw_o M$ZCsFF?rIIp=3| !ZR=\E8݊ꮪ|N7?>n(a?˗gG8M5=h<Ēo9`Y[ gkXw'ke7)'F濛bIFӓwp(OwS>[[*$OL (6k ZhPQ`T KmjjAe4YNi謿$ǍP/]10P4 2_4O^rjz_OiBn>:v~rNx6xnEnv:xOz-m60ӲKvzzu'i9>eY~6o+ٝax؟h|CoeJ~}-Qj]kpXr'o~ۙe?{G4\}*En5u hҫޟr:آ_&㖄ӊi6ݯ?MVx+K$:)y6jlI WҲ. 5Fl*+?㪺^ᯪ5}UA lhIT"$HBrdCHa"J,x\-U_ۅ[.cS`lQE؋ϪVw Y9sX+8X21\J-Qsc{JXӯb{GDTP&iyRf]{$>?oЫ K7[4H.X͌ט,$@r0?s&ӅZu:ٯťe4p^οbN~G7=Av=,r5 Y9WQ7<#Ѳ^9f 8fJ-48 5!BִW!I6E:q~Z`ޯ"7%i]Lzm;c;REcVm]PYc.Jk>ZMGR& lW6r*p_򕌾 .Γj` Ǻ6m L]NRV<ϊݣAzϳmЏ ˵Jt@an@!9!`*X|S^mRh AQJ.^j)+" u%I.{ݽY_7q2 HY2NzEfE[PRQ hN]-\~PkЫ]`ͮY\Ӡ1)))#͛2 HďU\xl^` 7BBY8%cbqۈkHrX[4&doEnB -C ZI-G$4QV F$^Jo4'S:^kJXW"<- vmg*iMF_OIF+~Fa2/ m4N[_fpLKM .y|]6}@y}?Rb!ӧ!G>/(]!TXђnd%w%)I8 @:EP;R?VJJHX JgIn.Wy%Ģ905H^jsJp4p#r"%>*ӜPzU`M)R5Ec5H` \(Exε# #79V7U<`HIaTύh/cM"+h7X|ŧ3;am4]=r aW6 VbIQ3 zMe=ӵ D%7#eQ!@ ˓dh49!B =NgW3nbJ:Q_5#׺>F>i*EmnU'6bCN;yu uD&k߁>_'N^ibXGU⦆%>CMww$ǫinw߫&T4Dk 0'He/ D!9_}p"T:@-gB:ye ] ۗL# `!0,3!CB2XM Ϋ/kW,oC\A37VGхɳ2] Yd(๽U9@1?z]C׏ Ŧ ^Ic3"CJ\H@hgp0ˀjZokǰkj8M׍ԮX4gk?ǐN]^H]T'zb'5k\ll=kǬ:0*k1A&<v||F|teiGl{9[t()3spi+8L&PFB%WY(PHrҹ9N>}*mKb?CLZ|1ш9w6[ĜSjM%n"/ D^dp@ff\a<zuY0TqmBqk{uMtK5}Mtwm{72!eLXόM1 r568'ɹ{ZDSURa:misѾ_Kg[Zߠ90UsI^Wq4?zi£cDNwI_sW\ {屺 tuGE?>ZW&& I)7"ie!P~S*ۤ.' $c R `_Or?:l{U,y-f|ޛ%ע1 Jd(?yt k XLl%9N4XZU!ނ6xKKŮEc3ҵJ}=8Ɵ^؋EHUͥ/\ /Nh0vdk/u_v9qCV}b9 8^!2l%N yS2&۽ )_mdžHc($a9 HAC;t"Mh},>4HX".Nc}B!u(>=> &%:IqyE*].*}**||@{{ŭT`uD]si<}U瀅VB7B|N#]c cRKZ̞rׂrX0&0O>0'2 #Ƙ|0r)6Ʀ) KުCi`rZ~N5/yҦ_M[0+-ס<˙QO ЙXș+w/JGc+W/H~9d "Xehqϟjbw W)ppP8l`#GjUBä:\̲)>`v0Y1QP?0ǫ.0!bpIQ.OnQ_X^^,z%a+;ƾ "y;$wI<3/NH+i:Z^8Mo7/-$EeZ1wAl"&$1AbX'-uڽ%gx6ˆk6C5;:49F-_!m`NQbbq@B.9V`=CE-`wVAzslJw';.+Gqڲ=_U-TWӷ^Mޔ7eW3Yڪ6m5.|GC=RHSZݫ~얝=r>S2{27UOUDQYO.]v]_ڔ~1(GrZ86i9Sm*̱1J~HQ~E@0O^lst{՛50!5t[(݂7{uٻ[~۷~/^o-\l =N_d7v=l,~EI@u>Ǖ8Cڏc!-saC .H83(QAԩߔW37]敾UF0b3 K,A`АȜ lKq@bt-F %+kuP~6uq}Ю915@n^Q~Rls. Sޖxz0X*:zxleLd)'Da! J9CUZ˅KVcm< l[z[:hh::=t֋ӇI ^n911X̧(lta$$hL:5^GSm9 aԁw>@~vu|ɊMb2xdJ JT5'p˓6\x* Q т(DD%=)Ws%a(`%b6{ŵcq5% -%쯱M$y,=.G.QॆQ;5aZGS Őb+8+Lд.)@uQ8I7VQyHٛ ]XyMDFW$qP} xG=E?)I%7aڄAHe5Ks x`0˛B<{Fynqܖ ɾ*`Qeof'6 6y O?؂T%$Sݍ `vsq7E"f=f{1vj^lZq51ywxQ 8w(`(emfD4E}uvcfU;]峘,}/i4t@y?Zyqcgnq;OnT y+m5Au{۰wy)-.<"%=)>(v>|86hZaH8b=وtBytGm/#%+c0ϩѐ̆wmsP4 ˱ǘx17O-{ycػFn$W!nde7A@6ٶmIn/_%۲%u زT|D0Q/Ѱ!?o 0?.A=s:{Ć\U}ʁ`TU픺kRTs]957UU9BV{1&"t5d 3^AQhn@ E3h nC߁PJx]K~s\ ‹t(Ԕ!j@sn$pf&jt@0 t@0x1ޞ-d'㰝O)KY !JqB8h+rÝFlT8JfV-XMFwoa5>ǗeUwezs8?*Lu䈕HkB9Ö;ѹI09zXpa& WƵ@ 7(]RR53RAVY)D*,w 8 }v˓&WG V[ p _k/x 7U6Q_R, ZUXˈD\j_;[#F$[HV0xi8p\Wt ;1-H c Zo=hυBy1=SUv= AO&Qqp1˜Bb%Q 6+Sf>v;iUa$ϾnU?#Lcދ1}u6C@++5Qaǂ% z,% t0: vTgiEVe_p Ҡ!a&uQioI5,85S@/.dk RM-c\֊P!eAp~rq* R PʲPSoFLQTD)@zJC r/}=]EA @.Xd3QȠg}F4jO9i ( NeW`6*L#NX5u(x ( 'T$eNOAq752?&=)I'Q?z~{~1kIhLtv)\,cIw G$o+ >o `8C W r+Wp)0]~ny$G< QRj}%~ǣ08tc1YˢVmGap{}Rt #g^ >:T 8Xdj@8j nd<#;: ] !h1GnX/W_F\΁bƞ vHs_#] cza@LxYJI:<ʂ`vp{scGipIr5Mͨg'\KFpdd9RaF2vt^WF8@:t^Kq r +9)ҡ-aVMM*Q YcIbu/,7e8Ns;0HdTR"2Qڲz/?,M Hi ˆd s?JI\zA0:c]}M<ʂ#y wuD69< #(n1 !\f~H0$Z>QnBX#\L0Ulb(nxǣ08vJÆOmb EEQD8!I`z"YU~tI(( "['/G8ɡ.Q٦Y6+8dQ jI*C[_xh*O-k٧^ *geN!~[_/񮽹Z8HIM&5Z#Z%oJj1c 6onm% 1( ˓ 8|݌e?GR2xs~Y |}103~1 JI:j-퍕$(&Ea~8o;[|WYL#=Ieؓz ɟbr{wu1=R9(`8W)PZMQ9[tlGap2$,h^?jW[5 Ow&9w h sȺؔHUs85u-I =32yAz')jDCe "ps9'~0tǣ089ʛ]h,xΩjWu#)qx41LLVjp2݄ơ@ۊ;zEYbpxɑFD:HO|Jsǣ08,?8npZ @Jk9-H v2ex'CZH5)s"a?rXhdWWtIuGMN#RJb-&%[Ord^6 (PrTiS04-8%e}I2''DL?A(K$"ߍ&@n)}W LQidS ^\Ƃ3Ĥ0<t1RF ( N_i%f@Oj4Q)VT(FAX_P((S+a( ^~8)eGap:옄‚ X>0'L8LLFYI!=?AQng$ ۙœc v,GRP{шش/Sf1bp]Qx0sǣ08N}ͼɿIʝI0xƄnr!'YFcu HK j2q;fx gb"7clQJ')VL|!ҟ2)L3YJ2t1A:ʢ:BH2LU%X"6ˋ~^M7q,Q€dp-:Kj%1UP}NkJ/)&{R,WmR-tW--|DuQJg1T(RpTTXiFEXF@߫ 5"{5iZ9vdډ([/VW#"s5+-`Dz"+ 0uM ( =KpLcދ>q$g J >ԓJ\heBnm>A*αY?_s/?KB}'=ǒ8==Q =[V?p^>kNI &K BC;Xl{4M l@0Q9׭5V> _ ˫Rp …Ƞe2%ueq/5+!7)Ddgӄ{$=k^xRbyo`?ў0.6)Yd2Èç<&$[s?DxWmRi֤WmaGt'Mq;)S,!)5fPfӻ}SQbt~a\ly3$HOG2Ule-զR- B_}X=(p:ڙT1I/?3D˗GSe7OqJ0ը׫oP/ Hs|+~qXEBQM A1~D;IQ󧰺KBj]L3l4{K"N;|B\1]7MO7Mi:ͱM1_gQvd΀pݢ)Gem2&` Z5v5cqwnmf5caT1hl`dļI#Zx7Fde1SG8!bLT U | ,1(Z( NI~&a!QVcOxe2?zP3wsI%3ѧ&\8& َ$IɐgttlRkYC^'~ ە"#56?۬f9N۽q'~A7vD@MAz}⩔:dX|\f$o-m 9s/>'Pȱxʵ}o-m 9ϼOcOP|X|,{Tv]L\V uKX]ؽWi]X,n^GAsPܬ:|Ȩ q0V ^M .?~~$"e3/? skq0֐QJǎ5PnE|?R~om܄k1aY?7We?|\uh(vv?,m'8'_}+~uAmkh|_?J ö[|%|ئ l6g7nmHb>KaWk{d kWeڟބXXα7'HBH~jә_m@>ܼ%7(.C a'k f.(N%֞WG:??>7><. mOp7-"Ly 濺Ŧi^6?^#RӦ`t3fͲfa1}񆸵p;D^g,b.ۛ_eqX.up k_}^ۍfZ=Ŗ(gso-Z/ -;;w:G"Ź,v.KGL;ax[ogul֛\ͺ߄~>;i;S~_j(^wWi}U?U g?^f-+xuf Mv5=5{ ݗwFOwt@Gx8V U0#,6с_#BY{>40ۻ0f,yD^!"Y%1uPmXIEeTEf8fi 7w?zgu79\/zTt{cFYa)v8pHSF<"01i)+GB &|*{H(:}Zv(CB )Yr )ߣ>C(diۯ| %xX]P=$Qެ,|6{H(\V?x=$P^r +ߧz}$Q~&\fj`Cr+hVҠ:| %W6>gW;!=ݵT]wK(*&&,$%rT.%Pް5Fqe1 HhǽJxp/g%LapZWEN("*Fh `=o&5]C؝!MC{X [v(tOae)Q:}x~>_b8փ;@ j9؁S{H(S 4Hdd}$PeFksyV`([[ip!>(/T騄PQLL AW[`{H(J(/Y )A=$P >Pu=$P.thdt NoBV7M@qL3t_ kMaʃ/|ֻtPui`Ї9k}$P0΋+M1٥|"k^zf'q k'9 ˙-k2b;!B*ɧEe&Y`a!18v#w{tiQO$#8)ks s+jJ>M:!Vk$xn 8o6IPBy (.@FsF"g#x/KJ(oX8_P@y.r\@@\UJqBa b>(WbR[B *u^`f_Fc(FGτK ,3$2^0S"D fV,/ݿoqo |to|=_õ=;Gw- m[O&kra}AakIArL $.𢡊0Txq"OL1BǬx xcYDC_ڎFpH 4LX(1^YX#O~\+Ovb3+G^r|3FtHlu5#=zYX2pJۅr"?u'FXPB Z.\{lQ`w#7t MRH(Łq73jcHQ9!)0 g#xs;XLnCQF <Hvܲ;r[緺Eztyܑho`s^<_7b ) u8%I$Np*9N$ 6ZY.flvVK7੒*%aJS h-OpV(T9sU 35(0xG4^-u-9[W-1YKO>毱M$y,=.G.Q3SGₗ)x&V󟂷X>)D\^a2BBp^nȧ"6GUL*z"fu"HҒH} xG=cytJRhd`6a#]N -uam- IˉiUGyӜ\Os+~L02G:a P~M5/(K7tAu|6 /5QH8eUkuh>/0tv5C37]<s>pm]RA^Gmvl~<ߜ7m%+øX+q Jټܧn0~QEyf9'1I&-rCyߞm5Rl9=̤`gsv 39]tg C-/9omy`oHcfhp`"L &0NRI^?ir/'lrW'qhw-pX97Xqt6b#P+9-Q;n_,Lbq5 "W㧑g3<# һ?0EI'deh0yzxe5$o1ٚAf 6TL71ٞ:OGZIp6qOvzge~֔a?;k~8|5 Uώjg/h;yRrb䦼jw&Y},`LEd$YOi >/""gw-by[/'r:N%x,dGƚ9fbK\cd9H4TISBuzb)bWjHyg7wgO1޾zv^jw>hw&|tc͋jv(:ܳ$U_ڛ+ѵqC\sE;j"S[k<8%5wׁ\o0ϲW1Eƞ1^%:$Xz{;䬣'9n1qLeӈtJ MP ~B0Yό'!\&M7x<=qh}) YPʁR6 ϢTgh Pw<sȾ؞ S0܍@Wr/2a_!v0!p؝{vUi"IpCoV"ǨE"Ȗ3L{2ẗ&ǀȈIW1@Bbr F9ܛ3,'WDv("[Ð|u?m M`c"j/ڊ9QՔ~KBUթVb9r+'_b#ܺT<vߡ'ނ+DQ W_v݅kC]v݅kw2ô=*L=jO jOSЏwŒo3B 6WGfbr5j$pÒ3M_La( Vʼ^+ze^ycqHHVbJ [oO%İVbJ Sa+1l5J [aDİVbJ [a+1l%İVbJ [a+1l%}ٚ mYl0?ͻG."zsN\.u,qU/pj\9MGMPxP"p@F Ԋh(V۾j% k-骯(WpjJڱV0.4{&ן} 1itx@n8qq#2PD#F{/T^y.~m1#zG(IMT*.0TpL2pFOԐZmahd{> *ÔqMbaͥA %Äņ&\ǣyn+O#%'FF$\Gӆ݉z"ŞvXi}Μ&i_CʷC4SW>S0k+N=8&,# Ą@8Tjetʨfb p݂P7Z`#0JN,NSiukUh 6@uFȼ>(=^k9׾X%$lDA%R>)1114KIy~x{Esl}&ή%r{x Zw h ;uM-4kFB9on(՛m(0~ߏۯ_ 7?üW"w3CFKRL~I/2):)Bl#d&+cTN<'vRω68yU G+ x@[v:>kڲ& C6Ρ:kќ2f L?f5QOa],U (4#;٫4w^o {hJm+JߊÌҒm^/_OѸf}״gbsy`~aVW^y+Ox < ҄WJ^i«WJ^i+Mx 4&҄WJ^i+MxEn&҄Ps 4&mt-8ڵԶjWVʪ]Y+ve>>Cz"r1 G[ut2(ɠL'2 ZPm@M{:^SB v*;{<HppAg>>:&al8}#FUz#FPBP`F z4K}|oaӟC ە(?DN?L@Ert:O^plM8K|P`h<]3Z^&as[#[2MSUEo Q1_?,WEmFͮF6c8Ym}m;ЧԅNGøA1 7k KR4S@NP:;aEڢ7 II6'F(ɢXlVk=GpuJ+%pXD D乷qubj@Dd EP"^6-Mû_Ut괪^&_~ΑΒ5^o>ZWUSEkdRXB9ᆴ}ytNvMh${)ٌQB0Ȯ%>#߼t}xKMho%X3Fw=!KCG& 7=Miҿqu ]/(U#[?ImSEןNfWb~KSb jۙ||#$RBLgZn7#5;a ,]Wrڶ!-3I5:U{ZXp[teRkWZ,xe}7.Z]gqv\0EMIx.5mt5+w{t9(+ |bUSeW 23KYWچ>or讁~Yڨ=u$+pMF}7b J[t\PXh\PȬdnߢALzţ޼6%=Ҽ RN ˼ aV~|f#i0#sK'_WEsiC}G(ï>1}Ol͒uQz)PtIl:q5uGSlxh8c!Ů5V,Ŗ[}oJ ?g)'k,Pi^DxhdejƍW%:$O8qͯ[歚V ?uyՌ;j7ӗ9Tozv.kΣy90=g՟ݴ mQz)bf6ީ40 '7.L2^ޡf˜lh֓^֌ ɱWf4W__'^t*o'_yo%x1ɗ"F&-#(CHH.KLpZ@Spc_;4)%?&Y\ T3ntQHfU0[O`,#ZE$IؤKytO.jCT`2"Bc9r:NZ`AhNM,7G.iTmFKhMMBPJWJ*恡=*ra{"Z(r[KrئPKW@R (nEO@Y8QH)#@T0Nئk%|TtP,FF) .^0BNؐH8$!pwT0N i>W6$N47rNNOf[We$\=bg-}6˻.E_"|Q/E?"|Q/E!"|Q/EkE_\ _\Opϒ[vgsP5m^EɝuMUk{7A︄!G}AD,Kj`}T-n̓v \aw[Rg3'QڡP#J'Kozr NThOnth%Zof,֊WS|k%ZɷV|k%zR0ڒv{L {xo!-t_z XwCi GcBM*xk"&DHFE+=,#82;5:]a['t kiS[%]AJ!m;Vu='uװֽO,u*żN5plnlЮU R,~/}SOG0'6=ǥEFm @pvpvlp 6i]mcJh~#Ÿr=qqTrF QRORu> #D朐>9CqQfq2)xa<5yޭч5r%>XHzzEM 'h M;sgdBZe ֒#ՆZPD2VȀ^k#yhXG(CdC"t=Xۘ!. H 8BQg8 k8g5r)+=U.]FI*rQ_ YKY5? \"R~h`816Ec0y>WB3Qg[`npЯWa\ߧגCY7V6/cJUxDr3)5$aPPcrA+No8u)|W \>¹>6 Q# w@R ...!᧙)g 6)ǨzoF=YNyꅢ7-߿ψ/Χu枍{#*F)r{Z :./~1Pcc zs&C{Ba6:eu|ƾֲd7K{mʸƟg\g~j[l]_ n tm`CSf69)B<B=E0 _9&"r@!I.1A'[ᄠ8OUb{)X|t2ȼq!MuqG%+%Iy wz? }7!}g[:o)Kyht]Wz!us(g.sJs";hw}k;81pgj~^Iڝ/T}b45KiTTXNEdۙ"YRϋ>ߥ&M4՚Pjynn#߃aGVY\]:\&z)](;ā6B'P P iJ§Y*Y*Y*Y*Y*Y*Y*Y*Y*ȭp gD gp gP&R9g[8KTNR,RT@*a҅TT@vv>ZWǽ! RiW/y`Ij%Q[' cC47sl=`*Ȼ),9<:R9]}dT}l%U}l%wAZ{@ZZ2KJd.پKd~P#s̈́nxr@) UOMpaM\Rr ~b9rhPey"1)-]`RURQpBb`.DtI3I:? $%hW\[/j-I!lNOPE !2igټ:'ɌGpuSUH@Adn3Ͻ V#'21)Ct'tH/|xi0RchJDw4#D0ekcRsF,!6J3 rBM84a)}&yðT˜oftr(Ddy+;`C>f)W*W#k…(xJTJ;sC ĪdxBO*h yB*cL ;X?S8Wsu0[{SV4WA0LO=ϙ)JW`e,pXRQ,KZٔuAJq@r8,9T4(@H\D"H0̢˥5G\HΔtYgS 8(Yq6+'Wޚx0\ N-O> Ez197BX⏏_~]ndI9Cjƍ6 ɬ &qk eD+$1xR# ^ :AI؞DB&t0m;r${\xI5 F;ъ}j}:UMCm#&蛛Us8S *y$dYR̰Xs3 -PmT lXONOy\P)Bn{OkW|m./ȦJT-W]}Vlsr?5/e3Dc*D gfb*ҩYi̼QǶַl}_E1Mվ;0ӶAk eVvsƙdvmT)RP^@wa~ޤmԺK)nrlcb'm8cgdx XJ^H+\zocG{S/c ~o +!TcAT-0[{wҽUN>Ojfd82AVs0= ަ}]-د9M4_-3Aj!a1frq$$v?tW Q9b:>q́$@i9߽(FIuƟ/qօ2 Syes%;&/u>Ɔ P=9It/5?ۅQl" T Yp͋b7oun=iK-.ļh=n﫷r\\VTcf> ~,мE%4U@QnPm׸Jf61r>wi?3.`gS<~m焑@lf^Z'1@w@oa߾n>c0/8y(NWW/>mO^Oz,VĦBQRS, s-:85*=s$l?g|Oo6m#J"j=%X42uh{B C+v <_zexaR!ITZ=H12ewK<1qBYSLX䭽"oZ8R gm-d N_4s1p /!l8uX N[e!RxRVs\JG*H")9IEdn'03,*͸JQ!љNdP&wXՄ=k?ҁ}XB&pLYQ0L`-%FHi.`pda+HxI#3 B'|QZ E(L<dA:  t[rچ΁$L \;K#XRJIz +)JHy $Z-|Ucٿ<,v`N \*B˳8y{qV /q8?.*Kχ] '4HοJOWdO͒%Xj?X -0e滾(LUv,N.aS];Zb 'Pr$-0@( ZFs_9$g`?'O4ma[Fdw,։ָO$~C.leaS8Q[!K^2u bŏ.>gt?Er SN%DT!e˛A;[f"2 L0u>lcpaϯ: (N8e%0iC(XK Kҳ+Ȟf*10NɊ"_r"vxr>/ٻN ìC40.! ߖPgs9]=E"aSZ(z ?匜ҙ2-98 4Hܲg;ge~oA„:a9m斚 @ZvVY* #7霧͙p漧!l,BT&h,b($Dݻ 4E6e>>_ IELCid !*""WBFE0*\z#.(g0{in<{kZ?55}xt1%섲q\/zVipK. []ɐ"do9آQ-cIGQèHS0`k U cJ#68R$#Y@ %\TknTdǂ;$kp.ҜhZ5z^GH 3k:/QreD;66J!^ڠLiL )&G,ulZMkREB ! 6ȥR m8a tp灧t h1kÅq%ꭞEJ4tW?]~Q9#sDz- &XQxV2[_өu}PGFz;V\R׫փ -5{CZ[u5*|D] udpkV*8ʝ7\ \bZpcĜE NVFZ``N K>\ A¡z]GPOh4<<!d؊J5)IL+(聊/XA-=u|<"1ɄF#YQxFItR 1CY8 pSa䐤2& #Ֆ$Z3*R1",RLp. 6OI2y]h S" a-AXmڠ2QY$b.e3FfxwLsiMrH'?bLq $`(Fj(Ν* 1"ZXBQJFH{NwN\PGd`aKXE`VLo!g:1O]a XFK("/#IWBHCBT!"*r+ ;gXj9!hk˹tt>;Tr_O~tї_m 9R!J --% Bxb*dMBTW9p΅#Β)rFJMXll)%T8 ySH*SD%%qJ7Μb΂wfİqe@k @&U_yăհ>(G jF> o^|qyc ofGQ$TI=D ")I;,\:'xx[95z{$ICjr/`}%]C[Wۣ}@ gXYiݘV.V /ܷUC/z^_#9&r6 M:2,c;X+\$JKƶQk !!-}·.i1ppVJ5ycB&eRVG&  ?dؼHuQKiom{u.m7 |vp^bx2CEQǨqFS&'nL-uMh<]]P rMi1[d5_؛ cw~ym7| mN$=tʬ`#@KT\\ ~/'z;2@R !t PsG])x[ q%M +w4 ۳h3v .52Y>F Mu@-3 RӋ0O=aB(kB)0d%/5ӨXI9}Ojn vY2@S 95^KȉiE]$b3rZ8wa~ 4mwStBlmB}lIU&1>V3w[qe=,ч8\,+ g㻁t{|gulf3æg}J^zy767]BE Z?PoՅKzXahFu]i `ubSl'(4\xښli1ӜMhIs]+eHs\zƎ.M-k-~YK>VQ )zjH25Z[Ee8E>EșHn2DK"Y!5^%ARs$zwtHQFG/A7)v|ĸDt1|͗ͱ|e/z36l\8$4 c@KUV] h:QpM>|yJ-ČW /Z+.S ޴3]- ; HT5幙>.ϸjhpQ gZGw@ha !@>MHv觭,yDy USS)YLu7~U]]Er+Z1G[ /d4/nVOɊkNU]?bZSb~ 0?]kHloJ(2 F\eji2u^˙/TףN9ot@pQ!J7.}ysyUGZ^=\[RoǬ^.$~łSwգM: [1J(٥isVg4 mAkt *gұix7q'( ~- v8nARA\eY*8ԍoMs5\!ãgx=2ӷl6RP)~ 0vB1ɗ%Z": ԝ)z<5*T~-<-uz>!JX߯oĎ9Jk99-)3@eh^*!BtB.ڡSend\ W>E_uiWS\ 7(f.Of̌FT阕F7. 2ꓻYlbRcg6(Y8ScmAk`T:o9LO8qׁ5U{=.Oɋ'oh_5}o] {|1JRFGMKT.?La"Fgqap7W ɸPVJ^u hVwOfmCP? SY|7af/Q%s~gz:ކh !̫' '?5mR6;ױ~ΛǓq%$8Z*Pc] l|x7.dͻX>a>m F@oU|m *# -pFZ8u5GܣτW;t XVQR*j|=#,RFXp4k%מv;R̰(=(_2d+CFzwڲT -Wu{Ei#B: mO9]E/(z#(IOݓ(R |2P@QEI20,Yzđsxɧ.%4/70NL |5˿<)Nf/ AjkO>Ud~R^n`*l-g^hEjĮ9>%*zVIxqr`Xrܺ_Suqo }OKx1"zҔ,UW40Ql~Ɩ!fzhi %t:-|m9{gFq}gc ב\C^ȫh&.gUtXSm0]kkzZOhF-ReryF3X}.k{"r@{x%mVzD2Gcz`|8f!c%3I];ߠЋY?^Oel#3 B'|QZ E(Lz6d28,Fjy#յe>Pđlts,q&Gر` @7y ,]eἪU6;1_)‚hq o^H9_S8Oy,ޏSibIesX}lR2хuH͆'8jX -0ea`~`eZoLs#5pt|a@.!V/6syRJN 2($ӝcňЄP-W#ڗσ[D/?b)ufk 6 1LEfkcz(ٸw@?˩(gcv7|V~[%UWʖ Ќ3<R~U1?$E˦jy`^*ՌF63nL';zg`nHg-#:mqkrTzT+ZBݯ Q5i0Bŗ3:1)k Obxx-Vػ]B<I?E _.f7D#ЦZ r^,|%K=9]~M%]J; 4|}=H4—3gbJp!"Q6HRվ244ؙ0X&/(VSOMg'h[uq"7~t&b9:Μ#-E"d 8)* .)#@Kd%A'h^OFb(L!DE$ZDqQL<8Nb V"y<)>&kjOf7=4޼h+h56?h|֦3cڦ3/Tx}  DΆ!:e3Y;|1=歙 oa9 %X6>b .KY$)Ð"iY"M}ފMUQWMx?BĔCr~[cR;ĬW逰  GuE%K*~uPq a*p*f??RVԆAׇ0d^Xf0Xv8&=݄Ruu\ګ"7Gj5#v[<#@Q5xŒ)qp,pAnsAŀ5.g [1B& ccttaz҅!+(!i  8VD;.CR0b2A gEBVE !I%iㅑJmYFA)>"'^q<-/[[y2 M#?xa-AX`2䘊"B̥(,@{)v wZ0%\:z#5#=fJ-}R4Bsz1pB+4:ԇ2 Sn+?;M-Cl3X04L`D$41'-PovY.&g,|D)tL7\89,x2i)g$%VXH<*0D%%qJ7馍b΂wVİq@k@'N'uÜQ[5l_djQ۸0+(T9Ofi@;o@t#(DwXמqSj픍AiPq F.GoV6} m ZˏozeRv޴5Po{LDԽmfUvvyP䈶 (Pʍp+:2,c;X+\$JH!m֖mjKBHƄk_ig9HbXsC;p hG{:46=9Q_Py<_/g3=:Ri>Aƕ-hp?kKQ iT dV0bcSGMT\ bjXĬ\HRpDn ZۈCMKEK.y9K[8z{;@`vPP a@O6mNeOLQƱGGAG QYd2hڨmrس2qC|Qt1fO+Y/FhϚJZ='1$Rg)6JL?fT<;4ȼExmfSlnizp4̧MC:lNnd?ŪpI9٦tn\`7wxw{s8ɖFlgѬ;Ks~d{nx^!qCKwn75qyjlެ- ό<Ӈ_{ 9j=+tQevYVD:eͣjfie#NDd1M:0I97?YcA: E}\ȑt῀f:/Ⱦ nfX}>~qu$^pX <̱qNp6H(;g=x;23k5_,rpdnׅd/0<^iӪa5yz迉ĆE>b<ɘt'Gdх&dfroЛthOg 0bFH2>9,h)Gp(ҊqFK҉"ڷ!Ǎ؛O;M~*֪КIR9E!qP^ q,tJ`U Jn)`ja+^6NvԮ:Dzf@Iۭ?y|ڈFs.p'!(fJ6`d1tkAح.4, wZP@)G"EĽ/X#64ӱNޑM F$AKY\p^CQRţ,ŭІ5ui!5Mkx7Ay1INҴ~_ >8.g ^6\/-(% ʔ@,AXdX"=pAI! Ͽ 9;?KR]Ss eDd5DL2g'fhBI`%0$yI1AZI2%( 2\@I 1jM8h-l \!pпѰ_3*KXb/sOz4TB{=x`!0-hznhl֕H<52ZDh@lļQ:iGQϤX&,VuHL(Em18(5Nj"0 1ý`Hu-9'M߹8a$hpʕԃ@RSbB@INԜ3b1A-`$x/MuZh.Z#KNScN3hq1ʹJ"&Έ t^|^UQN |QX%%nDA%S>)e1q 4OhIy`z:;#DC~е#|,mdV7Ax":P%}~&9.>mXL L{| \ TbSb Mocͨ~*ͷ\R*u\L_?~+RTi;<-~/ns~79Mwo2z3Z8L1ykVL'vAH IyLfx*~oK]*R)K B֍>誨w1{\2L5Q eZ=`:^&˳د,2^-_p-1 RK3[JJŸpBO{?y^oMf /IIC+vv![XQQ%sj >P1rPL`g82Q1x=j=kj; \Çy0xhH5x(};f> *йXג|s O_E0WVfq p:qcpONQFԒ'L;Fރy={ʠnwXe--9mbWF vHuCgs3O'| t79r: bQHe.)\DUp\( %QCLt`=̞kCQSY0*R JToC].wsYMT;_3 dR &N &Ц&SM`XGhk1 |wh{=W^%+.ӏ=K1-م{CErY#HXj#jk #Ӟ-U1%ɓV<ƼqAR7+{1xžu=p4baQò6΃w;bJHJ [FWրpXuvHe^* Tɇf&7;E>`92YRN #>aS \  0¤8rޅk'\kseôzhKCQ|@B$&RZ'@%^rC #ZіO zI\ϔL)PV91hQ"kZBBH^0V8 *n *bR*Sq|عowbP4@  Ndȓ p yJSK҄+Lb zՈn88Z ,?w!*O2L2(&I6%,GYz9S/* Nu_MG?bj,QJI90^=B)~ӻQͩr Y7..\܏Ox Z oFÊb.*$`Vjqm cܨE5F9ӷb>1qĵ4cMyv<-~,(~&v$D|oM.Y U6Uz_Lڿʮ*觋h9^Ƥ$ 'p."ߜj42H%bD.nh ;;r1&ww}6WKD-+GQ \I(Owm _p {f&J7yWz1?-3*n&VEuG5?[ثMBLr S_-o_;;BE@Q!F1BCy˥UWJp%u.&3E%](wU/G\qp^XQU-@/=A۪줍Qޑ>pGL|<B]+:?Yi#Mr@Eu&ٔ|=G3 Jny 4U, 4!\&&#F?I4,0+%4],RJ44O~6z]iM&V|zR]9hw~e}V-Ω4Npˊd؏lIBUF/Ly>-DK}B*S:LȵYSQwCy؁Q^;^>>[pжQFGH>XD` ܙ!ND$4B-‹W ʠ\=3 J,CʻXdT<~*f+o'd?(ogF,}i`8h41 2Rn]43/xr; <"sS ̟+\Oj!K-a[AA=%{JOu"<?Q M~ǒ 6ϘT?\Qi1f P=:tX\H}]dMG~1WzSPՎ4COb2%R)۫T*$%^PIOqm9z>%0qG> ٫\#|q:I;o.);Xqma>'q{y$ Y|4 PڿFJ#BpsGXeOfBBH࡜G`;W&Y W(̊h.p -D%p :kFh 1PKwQ!ԵItOZ#U!2=\ΒEh;kx쟝5dݸv|ƼR B29H/BxiX+DL4VEmK'AQq:,8m 6iP.҉qBJ!, %R&p1.Zo99qw|$}$⍕~˞ yfl?M;yYnҭc6#q8t!+5NHi9FcL"%ܕ`DRˑrjl!wɲ*Hoyw߫:jW(5oZl;u"m3e?S5x85[ze=koFO,e͇`O[Hx߯dJ%&%e*6S`B<UX-ԂVc&ye4zl502j]'Cl8<Lyj<={8<M+R6̙p܋AB0 Ym@W;񠧳e֪ȷQZ}v".ms$ch?Z\2ysxtu^^m/-x E?_(8GوgxO&M%j(Ýܠ3F<57LFH:{gΔEI8gި@e;Ľ :3ؙyФM\,Ѫ TZh8rf5ΊWǵ|6 /VaL[L"ՠ4u,82_6菵~R_U'^>A&8cEk+ qf%*Jk^o Džy{h-J!R>RL@SNq飶ěU`̙ [:R66z4/:h_ǣre|V`|4 j-$* %^Fґ0хTm-p.$,r!*6Su4H&$f c}9l4 {bdl7$cS#8XG!$gFb$S, A9,aKې i ֥Z:{Y]?Sf_ckA_-$C*gRbr*h7}Ä4^(j8l)"hD$"ed}ðd =!"}$।b(ܤK9 QXx@'Χ_›6pƦ> 8sg~N#mO.|K;oW'<KB3Np*R1(MJ ȥsAx_(FzUOSkH/9pnPanay8ȣ2q8cKlȫ"GUèWg^ah[ ]D^f!5>iz|.{2wߣ=Sݛ`Zڐ0PnM?NQ)߀3Ϟt _r&rU^pF)o'pMV2l_ck mnb:7+|2z |$bt*ZʼC).s8l5MĩiԀ*\(SF>Ѫ LE験8巭6 Xu֠Lvgj{پK1c:.x,eӄs.iRR+: MVc䟿P Z";Oiv($HE>T=ZPT|>hzh9_SSˆ^`#BXYL^jʈhA #(H8Ho`trG_?}AW9/kjgNY{ph5Z`(3*h2:ڀmT 5 ѡVC1=g4.6| ާ66&P}l*ĭ;e.d^FbWosfd٭caYӸqT&meg2PgmH?jMޟ3=+ ~t+[Wo풪2|@Mˎ bq¦dU=K<1tNl*6봈6zUo^p]x)'(=Cơ;m)pBle#t\r :aZcU݈//|SCY*xzV&^7rb^a+rBu$'2Z.+}YZ#tqg]QKcLεsҬDFUn4RP" 2q̗L"Fѱ%Ձ,EBI*"m0RS-yA)&aSN&x[r[nn|H0f=k2 "/]W <",sN!V|7OA31"y$0MԆ݉8-A,H*! 5 ĻVe (WWHlB O1jveZihaokY˲rQ~j\w.)k@r vL0bb 1"ZXBQ .3BCc؄8W*B]]qH :q`UD&]Ex "JFc D4VtY|\/&n؍.I0[FKlD^F9υHB;ET4'W;xA,v.2vA2} ;UĝYԜG?_{AC8L@1b/*mg$W2:m QA3r˭#:F CW6F]xFz1&$Whp$*8ʝJ1qV{tHb%ť4gBgϣY?{[Go0wh2Zܭ uv?߅l)xu?!O.೯wa-Ẵi~=r௲lۀBw7&җei2:f߮SoM0e)|ȺyO:idN^ʟWa|Skٌ%\SLT6d̯Re*xb 62Ԑk/!yPbl@4>+S10WG $pA ),#0=CJy3 u;#)Sj"9 & fEw@ Ayi!Aq=W˔<܆I5Bz +d" ǔ#a !c%"wBAY a`e;xI+tW6J+p( '=ӆ[dF QRt)캏:Pđɢkg#ps V B) 4Za5E XIkوwvwZ+X2+}Ÿ2.?d,R8k4Y ]K/Fz %m6T8v0[Nd쿫MO/"5[`y߰Z`-L|)<ʲYy%Ipn.\g\w`%dȏiKQq4 [ЄP-h)bPaMX<0?*h.Jg13.м~+C)?NZ\7F@-vV7Ȍess?oR`r -؉^BD9p,⛋]a^ >"[)(,?\-} cW~+V 85iatH{T+^DNJ>&-&˂)&?ܟ:EqEAǿs>&^_Twrm_hV XCQ[-9IAnW75Cg)a4܃ߥ2b;'y!'j #)"qK7Wʰ_d?_hg>k TӐ:kmR]S nI6[{MgQ31]~Aп{:#B zM mkGP~+hk%?65&g]p`4b޻.] L Fu h59X5EWVh ߀o.4z{K‘B=UWjhg+&F^)eaKҡ^jYr=M]?3 ۓ(&Jll[{MNA7 w符u\5 TW$5]y>Cvfr璀Cs[ 㑯tbG!ޑmQϥ~8M,*MX0yOfѸli-pP."xAd/ͦMSRK` ER\ZdS$mX}SUOsIsI~E4|~*Ȟ_t9HEI揫RMe/-Ça/YJ>T,z:g.8J>]ՙV01 RK v0aby -Pu%7 %[#>cK 6ꆽ! aFYy ;2O̿a,y١jXׅ#w8gŊl^=c5VWPS1sF8虹)vsR]*Iߗ1k岾AMBT"戻 OTK4^~<~. 7te˾|z Uq Lj)8` F<Bf5;ؿ=.uܦI#DC_&.0߿?~5HH@)H Łr@|L'2(79oL4*Ia%2RDnpMs51(BޓD`N&Q ~FLZW1EO*2}z뛪}o. 훌(!o~{}9_A\,EP%OSd^$-8P)k6/#{%tK~(Y:yyIh@aNSn#4PE2r2mqVd1c`crbВ9uFB#h̀$(0闡* h$P!h%IT04F D6kK1T Ęe&bܛ\ʄk:2 BР̅M@Diwz}`Im[{U %BN2w]?3K3ۋ׫q1Pc+|Dl"2"*"\ K*d .HAI0`kOrxSJDgL (g7#o* Fbό1 <~h`)Tm_P *ϭV.Yn  ,7/c<>DiŷP9HO/JwGv'gXONj"MDvO"AlڲD2NNN/{ԥE+*,KVҷ] RRƹ^?It"SygSG1'?L+MxZ_ r.OB u/hZT%ge)R*l:I.Ӌԗq ֗h2-Zui#`_fǙ™rǀp/fN_8 ؜7"Fק |0ZR'£Ӆe^v>Lub6/. ]BD Dy Jc 0]GC*+s1nA=PO}3:R6o9$LϒB橏¡h%7Nq*u gƏxeȝ oER3=ٱr<r]'ɷҨ,}Q'oʬ\`]-=Eiګ;[q/L"2:#9)R BC$@Ǡ@."K uوT,7W~tFWՒIs!0ceJ8%f6؞Q`41M>`i>ʾ KRiH*`c0AQ1Hb0֢p!Iɣ-6 1k(ɹPڤ4A !2#jg4>,1[Zoi*ΜEmUIgVTerE=>H$[l?>-: )PB/(u{SvIsEb9ǗPX-Ea ed{"29T"7+]PBvn"c<9lGpzO'cMEDRwEL V:Cd{2ZoE^5˹X~XA 0 Cwq0?q=wQȥvta(D*,SAdᢁ)!Qsz,에Ί {ǦD aJĸBxTEpNQ:".|. !Α}uj!5k-&0&uZkTع3֬ 54usxM fj7 YI8‚"Ȩob^*JE}|~ _ںY%%&g$5l{ueՒug/jÁé%̝|NJr~ix:|z枕}f Y-]ב}%= rЪ2yt5d03}?>e~5YEzs(tK6LGCYeM%Z[wIz~hEs-7jC}4vT} ouBn<ηt\5yrM5on|FwYxMfvr~4ϛxS~=h'EŚzJѠm!S016YVȅ5^pI!範6!'7}%%k,S֯֒X>n,1<$ 4F` DE8qAXI3I:Mk$fSBP- K}$!z#Fd|#!F;Nkl8-:Fs}5os[""ds֒ 5v{Wl@֬f[\Qi\BF'歎xaqJ!I`Edwj+D9ϥaRk 1RHq. I :4KV:Ne,rKq*aW=P/$%TD0eiCRsF,!6HRli(^(6YZIpSSc ˜ft@ $gV:QlWbrjN(G QЉR) ,&  gR^QSoc'dc3Lakϰ&KOG]#dr:6,ˁGZZq~煗rjq7`10&)x0DBD E S2: S*WB&H)gA"B(@f5Ks-;]Z$|BD 0Kks!9S$vT< u(p+􉓫"΃졎fjSx朗vQ5w-/&㕙 a<ՌlYMzR4"Ij%zxkBrOSN@֐/72s\@ V~~sJ_ oL m^LE U(- ߬ v'l?gVÞkVÞ{VÞ :0-ᘖD9-Iia![CǁsɗgN!Ig)s.a gh~L# -DҦ.ZgG*NL ;[hsNʺqRYxl*R Vk'ep#\s%0פ KptWc=S0oy-D|ThRT0LOSDDQ)pRKN0Ӣj1&C.4P-܂gZE m^#$D2VHo΄F@ClZP'1cUc?qj|e&OJ"h > EX#u i|6?5!Qͥ+lgF\4I ,Hm]ko#;r+~- H) (ښ%$\ϯO,%٦,Ɩ%fWN@1*RaEj:Lo#y!(4MB '4Z1#EM)"OP%}3 Fn5;NF G{& Or8k0h֛05(GYv}ӻ|{V~b#m#5[RӿR\0^WÑ"9Z,im^tsWԍY.76 <ѿ|?(I4ºYq=0.[LO?jA]7cM!pKZF _s&./q!8ahTB>jT;5wFp=*[]n؛I./駶*|V>eL)dp">mHortr0J(*ń\\:/ 7]hn.`hgF!jr`=uU^@˭;AyѼ >kMyZCѶh_VmJ.!gY7Zrlk@ "=E= wh1{7AE@QvU@Qr\Wm:f}E }8{QF׻o]㊃v+vƺUg3<г=mѣ"7*~uCSf69)B<B}> r@cDA"*r{shlV}~t]2/']ySN+aE !zc&8*_".Fsķ\律L<~-E,:կ77s*FPG+l<A?o5`@C/msl @hTFb/mUcȱ=@)X/+<ND[z(5)D'R(p6)Q)FXB= ]Pʠ\?3Lkg _c{(^F9&ڄ+.zwM}ף_<?F&B=/-s]ݩmq^ϋfyya"Z7BTԾYNf,|z'z#Tʟʟʟʟʟʟʟ٪H?[?[?[?[?[?[?[?[T!g+g+g+g+g?"SRVWї# ڰFd-itD?QrJ2x֜`&΍;Ecy:93Rw;`*yf1DeT"3ENxTppJ&fZ-(K$FEDI]rgWob90̽X(^A4OE)7C2_lQ`zjM|~@{T-#۪Q{>sQ x2oQ-̵5ȝC[ỵk&.]1:k]uh]xa:gQٰs}} W=o-xGO{[2akZvf^:[q;עUC Z|*y?ՊxSf_=Mϭ'͵2\W@2AfھmaΛsMc H$Q?Y! Vwυ$hM "'-W92Υ :$Iu&AUI !#zj1!15gZi+~W }N^|93urz^`^D^ =bbZj: m bKNCFHCN@dT,*ꬨsf0 K=fC^{qC{S\`U9h?A +7DJTJ湚MF8#0bK10!򏕻 1Cv] Imm8ԦHDHKrMdq5>H?[m_%z!cltd2q0¯z|Z]Ҟt_ûta WWyAluk]h&w=z3i3_#fJA/ep|:<*% i!ĩ\Q[.JYŜ` jb.&hK;5n%>Ás.t0Π9:ES`w0~?F~ofc-zS,j 1 DsdUfZs&Q8S*jO8KNPX *ȶwIS>C=uʪ-gFQN<}︟fuxlV|U3̪U1O[uXRRN<准C^%N6,Z6*>BֶuzqlT( EP@lΡ΄*: @rΣ a#|UN3; ɊIHE m9p#&JQ 8F$2S wD'γӫ@|џug^E.ٽV쀧faw;^&=|sye ~'60Ld 4T?Hf F۲oKe;\8,ZGp X6y>KGfL#1JSK g?>Oy?{K)Zr_Q=yke֞2 <3Pt/ȡp#1ke-F 7CGȿ^3Lq05801h=gFO;9m?/^!ju&8*sgd!DØ34IӜ`gn.:g壍:^;]#C¹pu _sw\޽zf5'%~^ O +]xDӟݜށ UX>|=L{M;Djw]6/ly}x .mن jaNq w؊|/p5A]/_g>Yg|Mݻ,(S׋/`];b'6tPjwg;wgW9bwkv6Tϫ NRvnvsoDBڛ88}:;S%ZW3YgW{E x9R7-7t<-n2-PP*`dW(o̻aO3qP57pϿ84Ny`(v˲kX˴9dƉ %Y 5>5&k#9Bu8.$NuEկn=0xo=tShX^lW+^Qm\¥XNW*ƩgU>XGeI%(ҕcܽ6\P&L:4j^#:kU ͰarTkMV5uKa*ؚEV7_}FV&o- 5AtwOI\ #j .FߵUS)rv>oVoDMr8wT.\j6Jk婚 5 &̓E7M)cĜDjY{z硩Pc2c1N#1;f0:Tb/9%|nwdF2S$Q߬:qɺz$(sP6(d,Td2; Fa]4 4fT{}qP9. oTh}GJG?QW(#U7v~rJ{U!m [\i/N{똑U2.LfB>5$=fU^əJj3k9JJAuPz9KwEsaz8כFHK6>`8FOi;k_,HU.!Օkʧhlpn}c/@)YTw"R@]Y#-Ġ;*d{j]j};4F%~./VN L.*`gXc{5`QnzSZú:mq6ADUCˮP " ı&6VW6b")`0 1YXWOW3\5Pm  EBP1LN0أ)͛ݰ֞ B6PB kGU0(JU\MA'SqX@s.U!] 2U*VjZ8Ov X=VHg,IC55ʬAugHRq뭪#T},e$YnK+50pP_lzt ƍ!hmi`nj~|_oӞUuC$@բG;w669jfѓE:8%m#w'U2bqT̸ךBԦq4ZVO Ѯ dL P">=Q4#dba@aRB^:(bC`{?o>dqz|hE418K  W?fQMpKh~ezF-L?2k$|uj=싳tT/<9GH~@mY8 ^!Qї/vKu?:>;_._+? WڒdžԱ5'K.vkeu۴VH1Og?NNeȷf|6~ӷx"VϺ즙%YU3-K&qƖ)ޣ^;S؛^;rܗ^;ekaEGbA7cF'^?69|u뛭o~/S/0^j:-cJuIeʵ.}5VNfS_כ+;_wZ-Vzנ66ػ|btI=6:6]B>R$ 뎼S!Bu;\R<HIigNQya k}!d;ao92⩒-rtQW9?jV_un! kĦe|Q!Z~߀oUÎ W5:Z6Q҇%{lK*6-SaY[Q\鶤_0~v߃oȄH# :rkyכV&U|(vkUK5ѓcBpɞϢ7hmX! pɳe?}/\ztHKLG$*c3K<>M;OڴuC1#g8q]3oejTϦ|u_Wz7[?\_C\֋|wOs:Mԑ{\-~^~[+m$9үŸ!`€kxX OaQҨFVOVjU[*&"22b4=2e?vIt^#n׋rӌVTnW*x+<(]26y6o}>^]mJe0f&[*XVVqs2 Ku83u}JWq/ZQ.pˏt3L\\)^.^sQ$L>L™tbn$ Ѕ^d"*k4lW eۺF2Xx<]'Sh੫~띅+|+ѰI԰hvxdY܁''elq3尘|!ÍjvrG=F5cԌ[3nG)хqƱPtጢl zsBo}~y(yc7PLā@1s25"<(jJam&r>W l%rX'LԄ%!N @x.âa {I#^J! օ$rY {leHƚ(Q۰!JZ+iRnZN sUNJӫ턜Ӳs7tfB%{mjeDRΊ8n ec`,do*>16;fMvIǴcN)x)B Վؙ6q̲\An!#M;aj]}!6z 9T.v>͠ :+ˈ]mXRw<hj뫯__6R0ɚC>\OGAkT(mP zQ03ZLi .!r7TȕAL-+J!ڃaʏONtW/ ]Lx0]&\?r]Z#α@.j~ɌGhc6=qҶ!g6iL/҉2 ސBRs{)P<QZK [j*&ƮxBWW]' dC{<9Z``y̛Cu.z@AuF9svt3[t6֙31L\ >J!NP2SLˆ1FF QlMSUZF}vc?W2oC;Cٮ([?mօw;qa~"oE^c 2`7^0BiT[cN[B4L-JHLYl2%Έ *wfJbø tr>y&砢~rTɖsfXSVxbf }#"MIVޤ%%ƺFs& 뙱1$Se?zNdxƹ{ZDS?URahycAC% }M^8>I&-ȵ@9-R8m%u |B.Jˆf<v7\GUjGgRx'1" E42nL~av gn]mvL‹VXu>7IN&8 !1 %9(1.pPǤE \ 2L `ZSQk,tXs)h% ٣NgȵG~Ͽ-(+6\͔<QN?/[BԴM4s`d8H"X6X'ꍊ>Z \yK<,#Ą@8T`SF$5cCL`npMU&:="0X GlJNNSkv1`MJ&Έt]T-W%cܿF$cp1 (DYJ!4&4&c#f <)S+cW$cs]B>u!Mh+pgzswK/*z5 IpCKx 9F-S?E#B%lQ)C~66)ir g).bN3X{M6*d >b|LkjRwR<1xqb}XNlb I|4-ɿgߴL9;.'Dt\8_*wOeZFz1^XI-X{IjB+G o޾B"ӔȆFECv728Wa%tu m 7J`nHd&X)5VDk/- DjM/M+cDF{#4Cbu; |$CUqM]QY(|";!dC)tI`f7ɡ;r k#m}pTg8NhxsM[r*څ<5#n&Y,TK#gV[_ 4{Rv!Dc[p_\$.i%EKTx ۞fVmLmwG_{=y+Vf~z+{*bBo:߫,VnZJ9Y!=* -巇y#v1LQ]<\C.KJl gJ^XZ8Pl-Xeeô^MlJwuIm6nMk'!kۄ2]z?|PwRj5ix6:t͖^lw[aEtXтjytNSؚ 'ܴ؆ګsg3欔b^6}gwNPiXg"@'-:}- >mPT8Nx 4ryTGt b80aul"”*Re9'AZm8Y-ibO$z;w>ވgrj0mWn䱧1,挝NuU'߯x 9}ujT{:S|iSٯMs@bē2aQxeAG~^b{kKD_up*w=FA {`)n2gfSTÖ[ƤLV*ərBAIMg 1Pkpj 4 nD5'jm;`Zy?= 0OQܻ'$Dc q.9N58t!P0uК|\1sOJ\ŽHBI.*w$Z(_qcVj V Ds*0>pI4bluT0~_N5euUcZ (&[a2>Bp^npF񨼊IEO֬0Hl+wS9&G$eF\߈)=aN*ށNL-DX0Tհ  9KEgE:E'T95eyissdX&_:U]~_;?Ԭxխ JI;,A7J'ldcrk7F(FGH>XXĹ5$2gd:%"xbAs̀'R8H&(2Sp,d;s~wpct&0.W 9V?{WF?1>o.YHa-v.:,Qqn6d"_u:OdSokI?rĽlQ;$1pc1V0!owimj~t?s lH/ flx6b~-fN_P$6+񥤫M.:tIJKWVn`b\L`Ol^&ԧ5W/#ts2)1;/s%_`d1Wv=VDk}A +sRUVs7WIʅ)cH\':Ќ }C`0:MxŇ+3kPx2zsgo7~x>27zhvƦlklRxu[U~xSVEyB74kBhܧ0awzm0}ſwև;q4 gxZ)T^F?R'9!x?#of[t q|$H|Z]IJXv.bE,e"]ē""qI),<SXx Oa),< ^a),ؖK*nFy4)@q#)"A k}>˷x̬QF%:rvJ `."ȇ9HmJ&d*Bc#GkQ1%b'8>jK ^Ȍ ^`A ƜF*e&f˸fBQlE¥]Zż~^斡ㇱk]?Mzx ,vE# kf.D=Zlh؁ׄrhXBgU⏷`f29$( rX>$Pd 4&=Y,:VtV!!@: 1"YobA% 9Ij$f.e͡c+6M+vHMh ;RdtP bc>0XpE+fB~Ze8u8uX "ȱD%X/)7HM Dr$kA%yckU@#ZNf9M aR[OZFp 5*chO4rߣ=QN'⎝Tk'`OZ>9zteMI~5 X-`4hFBQHem,|#g7BA)2HJKh<"*-@eTJz\ DC4+B$X`T9F$BXYL+#"b1hʧ4ʘtfM-Cqp4pNq}+Ϯ?vs)Cs-L |{) xLy}4O`#D02z@b0-chX4%Xe<$ sU:d2kp6q6k8q+Ns]~u!SM jx',%Xg,kouˑW566W,H[}%=(Qŏ|~$?;?r֙NniI5W77 >¹=j܂M>NzSQF>]㺿.tvy@a2-E~.4ih7sE/eOO9GYeEoޜ`"{&CDR)RRA(Ŝ(aHI|Q \Kz~ 8}"&U|mjAɁVKr/XQӿE/G/k]4,Ytȳ.BOXE)󅡢OĴbHo;{vTIMasnp r{!3Ͷ9l\*FXsr-KbPu4ıŽ!r̙6,/  F W\17l{+:JGM*(11ZSDu#tv':#~0}zeHR3ÍFG8Cъ(<$H4KqM8EBG(joFI*6^ԖiRwUoEFrX`FHaD TTY$b.%B{yrE؄ A2 ƨU1kQRk NGgu%8[~ AIS˵`Kr vL0bbD{bDۢZ(i盨lF(\&tBnMGH HiV8e$S {#E D4V t^RW}Wfs1LkhHRҐ)UsFpcԱ+> ;i ]M m5Ae4 )^GCl+X0J`D$4%fPXYBe3J(: G<%S8M#匔$ ?YBBM%uN$hGԔt4ufǞV-=vɘ|ΏGSj21aI^{ƉNES6 B đw6XtN8ZQEoAwiv@VD/9Q7m 6y[BKſ9CTr clZKJ6jyJKt&HY[q͂qPG%tB5L~vw1q)F'$s3u |\TK;˭7#qH 7Tՠ-x{N#W::) 3¸¤{FcNݿ[#'yዉO}1IJJ猢w7jnMb4$4TIr?{۸ n~I.\..Y`Џj[gYrIIJ5,[%9All5U]ztub<}i_Hȴ+V'r~({8`Ywu୑?ɿ~F^='EGʫjg>~eߏY~cֿoA 0: `"|j3DIHi4KIIa16?_7% Fr>h@ ejуoZ!ĝ|6JHgƞ9fQ`٥Y J#)iˋx.Nvz\w1B#ky;\B* DiG^^3c~F}('ߪavۼ+ ~jc볕εwȄ+4ޟHGK""[zmd>5]fYYbrs_ gUi]LeaBݤ+v`_ɕ{?N;ߢ`C״\!J*kY2FrrV P[{<3ͅ/?~ͷ]³50PdT/4Իh-uh]t>RG)IgBdVh'OLySȄ \Q[gZ Z*y!bJ %IN2Υ :u&AUFpBHZcLH{34k`9U3}/ޞx':G/f ?`}}5ݻv]D-hztp=-:A1o5a%*ADҢdd,${qWgo'<򈪳ˎ)QZ#BĄ.SB&#*R'ctT2E t[ޙ yTJpHĩ\`*Qfs3-\"c9[ȜxʛOw8 5d[K]lͶwjwbnM_S/Nm`L f q4{@FZDr40θ_0v{2y.6oβ:2sm@}2HKGfLq]f9KJd?fz9?~u(`f=T& = IfH4`!JFF0 vFeٚeyBږz ɲ%%A>!ǽQCpꃑ~֛/r&PN:KLtIAWIEY Y @N]Y$<"F'( Ds+?[Ӗk녥!R"1$We#d DF.x90r1ћp>mN9T#žY\HȔ iIƷ*"g45&]}5z` HV#Q%¤CĂH&O<ц `~DaRk(c$rqAs *V#*[iDѱtFΖ? $)KV!,8:8HLmꘔKR6  n8HT@MrR OH 3:9@K2R qQl>BI(G QԉBJi qnX OhI`GڽM DBtJB6֡ ݁r%΂ɮCE.Fz!RҖJ]t L:ZJ/-@NC}ŧ\Xωivw%(+0%!"RyY ("LDH)R&*d@W'+h`-(@Px f120bL4_P]6q ֒#Նi4B*H ,P@q%@ :PqO&7 Aső%u7%QA)QH*zFs3[xkgExCLTs)4 '5M : WBj-@ IC Rǂ8B$t@)m,0|8A׊q΀'5rw#Ttvk&醳E~Y~4!E:f ߫줃wETVQvvݻ2:nۜ8xtܜE?߶J]Ԟ%KyߩVVQ.pX\,&¦Ѹgt~4>=>aՋY./6 C~޿8K_>6k֬qi,cyv0si> @0H0\]ʍSݸ=a6.DFߪzpVf8/ i)~?|+1ȪD|Xf߬r40J( 9ЊN]U~:OahQZ)XO] r ԁh9 |lYc^%`4~/\ 2?߸ZQ-y|?ȱ盘x:2?ިe6?^Sf5dyYq1FijL}C|Hkw F-B2#? 1yUgrpSv6^u\dnY߇޳2*.qA=sEmGl<>O)glmi.WF[Gn:=eV o"#!o/X"Qn( 4C6e->tEᄠ8OUb{)XKdeyBQtqG%+OQ'Ylj1zEnted޿<'U&B+X{ɟE3bC<<-`/NnOZ}RpK(1IL8٧~$ G3^&Uwk%u4qZ0+xk"ͥ B2*ZFT}쎩;y 2AJwãKT]PɪUiLG7H5-_(䅋iX*!Ri1D+R Sc]n9yNv~硏Aw|I+JI<$(-HOXP1!vz909[\ZȉH  [MvT/^2]wQ4gEV902b%;  G˺sڕMR8#T$\ 6i?d^N߫A>mka%?+sI-*i inK~̃Y3L I3ebզf;tn7Wtp(rxi'{'1ՏT؍\w_FBw/nTp4LQ.&W#e):9sp7] NB KBT* m>d6zW3ay7x^׆vRb+;?%*fMnTLo'Y|W?|צ6ޚE!9?f@nǍ#I_Ƹ>0100bm,<%Jl&ZYU<ͣL62`[JfEfEfdDaq u<݃^jW:|gLew >M?wIb .4ըi)VwߩKv u xJL ;22,Qj5e5Y SZ='9j@D:.ߡ׌:N՛T3mOU6|.*?oƷ ;-:f&)t|';h;k?/勀uJ*?~Rɡk|( nOUّJ%F 쭥y'7(T:0$m a1@ <* 8CvYI(7JȖ"NKNOlېW)Wilȫt+=߁_`˴r QF%:rvJ KFń`!#U12#|xd>ZRNP=)Q[M*Ffd j0DPR5c6rk(,0g E /J>}L7I^6 ݘ>L]}zW 3> Wq敊S$"DA\B31'TGI'ɔN:'ؔӡ'8FӐ? Jڤe`^ew) `* N.rkl;-Z;wef-:!*r!*L@f:$$R%#c}19l6_T'c=10BB2pg(d*HLdu4(m"LR#1s:CƶtIZ'iE@qsabV`JaFKϞZysx 9%<5GJVPH.= KtA*U$bUYT\JrvuSWR])N h耨?n4\ս?~`6\:,ݒ՜]++{W.fF\kdz)OPJ`^,7sߟ$|1CE9h}[* Bk 4*4AR鄡$n7w$*A Xm\?[]P\a²E 9-/u\VkW. MLMLF 7n{%1iogfԳBE9+\1pޯ}LUװE_^~ #~ `Fol$VFFzۻ=!|1[a컁_YZ'{lio%лO0NALQxO՝1¤Ғ-,J 4`7)?fp6*7 I돫F5d)O? ]wtcIWH4<۫+VlaxXr"8tB 3&_*E=lV}J1UjMLwh &qGal/=.^JJЧ8Uʋ=&U~-m-* n+| jBr3m}lfCN_tnJ~GŚVYExfpSyt.wHӇnf/$}~;nի,> Õ ۢ8bC*oFlRR\x˛7w/rm`xs!,xIyx]#"x]KBǻsso,X3s1?X`-ĢIB FEʼnĢoS8|F76q.!MIB zAAOJ=%r D-UmzJTr= 'P RW@0bKh&jiJT թWx/H]QW\.E]%jEC )J(Krp58ⷵ+/ufby9aZLPⷜޢzѬja68oxIi[Jn &L&⵸ 5 |9j:K.FM'jY/*&*9+T\ #>R/r{S3vyqN36TA0QUkIGzahC0W%Z>W5%61M1&| _)[ӯ?} W0%D@.FᜨŭwwZɑBU"X\Άs"W]jWC[z 0] 07}Wz41oO?UTzJ%$g&MXTA+3~6u3x)X^0%caWt؉ Ss%@3G2#)G!zf@TTcfOJQJs_^MnYJ9T ]2Ŭh 5(p2ݕiPqu:- g=zXAWXf aYpSiTF^9],׵P `jl΁SlRe)K~Wr.f,ݯ\; "W7gN(^X(CL<dži K7^9N2zI v?03dms$u]b:r¬rmrk[$հ!-NTa: wf6GijAm>VwPT{f>9H 3,/T7<1I閨 ߟ옠~^$̈́1EL+6QӲdaLJ`ųy'/TZ1։C_z646> ߪf۽o< 6s\WcNyg'#ljjub^wg j' бQMuQ'? %e\f{~pt +(sVA-m=3{9A D#S|6}1oP}v7<ޜX}cxlO#ć -zVH*wC𤄤 JFτFfGkE͖E̊A.@j$Pcc!%AͶ/mF"$7pQxFIth i+c!JQ "(joFI*S*Um0RS-yA)Yabp0xsgE~TSb<seAŕDIH=,BUre[%B Gge%9 7> m%r-\:̣h1C%VjPҞ= `>¡𠘫mufα#2@uXE0tNTGQ2@4xX P_xv

"sNH!(H9*&QۣTcռSkIn;GyiHizEM 'h %o&>+U&M\jaZ 'B4K3A(8\+@ Zv] ` qDi؋u7%QA)qT(jg*a (xOB'G @?xD5B^a3>3FIRAGaJhF AC2w 兴O; Eig6x^ k8g5rywLʭ ^a!cQZ*/_κVI n>ʢyj9d԰Oe馋_jO͊%\T+(WTa,nXsd^^lmn^1xF G/">ˢ頳fu+O2ƭnqn1.~~샺I1zPwHSw674O͟ 19ࢯ.K/ߪ.zPU/f Ie? !L/2CO ^^Ɣ"/M'rUD=5he#aP\P1]i =WUpOj]~6i# @NIi:mrk| '&|җ%`8y/\-5G$Eݢ~(&! ,7 7FEYb YU Ԡ@64U:;C ;R6uFydz~͞$G_̯W]7|YZwyϢ{E^רqpbmgloy~v. z6eQ#7B2+Ibs)bn$|\$ ۻ@3dSS4N< 4UIBJLqO8NQ7.|.D{4i>߭ĞOoޑ?FM㚗-TqRBIƟmoz_jZ`Y2 D*3҃qWB #As:l xAy.P.6qEeVRPD>Qb8 w Z@{Ђ୍4_ɨhQƷZiW 6Z.x^.6#zytoBU&]zࢹ-;tt$t$e`(޸!N1 5n=TW0)E|Fh)9*H19ӥ]mQ,Ho}XQuEh]lNEk }6%f!#!-)#~|(m[ p# ڰRdK-)u 6HN:T7yKL\yBuBdKuB惣:93Rw;`*1ʈ N˨4Df**OV}p(!j{1[*PHV'S.ygC_̛}p g3F7m_prTqְH7GX]'?mvpe?]S2=5yy>ߢy5UjT*36J]xGIݧg[rЪ\g>|-.]^oMa࿐2[u%?.֛Vt:oJӍ|lId>i鼲ηNRRƹt[$)}$ͅ@=Ƙ14 uw"jrICSQC#c:ka{YG-e.Vؠ7] Jon}SdT@$@&Uhqh%7M/8vZ[y}}[(+tSoMN _Vmmj ;OL+o7d4oh0Fy\{4$z҉<C$2PbLޓ\AJ U1YϢhRNCTTqG.j[365fr]ؚd U J.4'Gbdᖼ Ӑ'iP`8&\cᄐ-|H$Bʊd&Ydhs4|NH5'Ӑ?3βxe%Q3uBs# HЦ󚶆a\0.OEk&jmݲVtB@t1X9\ 8A Å \d ID,B0I]& !hEuMfB$9Q,q>&#Ds-ֆ>Y OE#&jDӲF4F4bJVSuKY@QF.;MEȵگ9l;kW}Һv+WEgqTwf1TDM"ݒHf{M%HBQNǃ6ڨ% ޗhNbie09 ';^5BH.w]6"ܛ*g>nVAMR1d+`t!x=HڔKWFFsq+ 鐄B*HXk|VRogI ?X035^^\Vk/exviob\.C!(H9*xW;Żԇ:"tWtt][F%XHyB"& ւRKaoy4FFhuE"߻Hd-5p5X]܏Z؋u7%QA)q4 j3g4@} _0^ӵ]4 TᘨRh+lgF\4I*( n!iϻBڇae(4MacTY`xqBO>oD s T +&+!l_˅qL8z/_ΚSTT0Heus5ήgQUMi[ddl?.v?=5+`s?S\0^_Sᆱ:/nThhOtvM/ҍY.76@~j_E|E%7XP5{\y1nGp3sivsvfLciu.FYTc}I3w!f W[Z/W"޸A1 ?>gd"ggW˘RD\HoVGѣ|$ 7&+8ڼ WMpf?B"Ps`=ur9<<\n7>Dބ/SQ 'PPu9??E籫7E4mQbMB<TY_o4ynAl7 @YarܡMO ޹1 l>1LEy䔖epu=%цr1^uYdiY>2z◟Y\}u٩rDܨё!rO1Hhn$|\$ ۻ@3dSS4N< 4UIBJLqO8NQ7.|.D{4i`0>db6K|}'搐̈́Օ<,TqR9DѰ `QF緦4H/nҖWV] DJQKod`^鈪(u۹s :PBKk1RNj Qh/ ,mpMDH<a U[WcG^mhy~hߓs[Cַ^Mf\Ti[nwPHPORyW>KdݕOʧtS)]|JW>ALE8G/(ۀű 6\)[PtjzҰ*(jw((j?wHiio@n4 l7€,% ˲SU CAf"2Oxą8Ʌ=T؞ Sa{*lO=z=T؞ Sa{*lO=[؞ Sa{*lOeE,4XZT؞ Sa{W T Sa{*lO=Tu0H5]aú\˥_†j~s̈(|~hFPeԸ9tttD@JCNpE6N/61Y&/1 *Ƙ51RHcl r^J|_+S_qTAm[AJHjF?{х7ۂNFshNO`+t~hLeJo`8ST)Q*&mD,@Yl2L9A1X%T1P]A:b-B+hEK-h0XeƼouy:qaʣG(45L~ ՜t[׻?xjsԒh=I6nD9O*.._V_na+"'k*'^怪I?IWg3rY8閡#"SAx>O: RG_P(D[uMs"DG#q` ܢ΃[i$K-;魉Et]>']/}\ jȾgvD Lm09y"IWOW }5zqT:nxƆ(% T806$C+7s&4Ccق<ypF ZygN3|p k@Cr2M>QCb=` $cRT@C2r2Eh)!>H!FA@s)h@\%-٢Beq}%zi bɯ_bM6zZXFh!H" ww'ꍊZxv,WiѲ1j+I*-J N)rڂ,RSR4I9tf -T\(GҀV)lTDLUZa>b@B>55( RXj8<P"N-0[gk(;|C!jNYeS)J /?=K#j6x(*5Čڥ %XDb3g;)4M/i`[n!ScUzbߖp̉1 8|pg\KUwmdd)a7gK[pV7ūǫj豽z2ri/80UvDKB ̇, b Ik1!eLXό/ \.og'2#8΅#FkuNFB焐v|8ƨ-9Q:8܉*K s"cYxB@+ώx J:Vy *PJЩ0"HAq11E@ɤ̅PIkp1\UH9u,Hə*Ĉv!BWNe"Hj9VHt L5q9h9D{ wi&a׃\{z^Ň>NmcDŽz-:x;EgŴ&O_Qq0|w_M%?W^׭ۘ>0["ﻵ='jʪ=ݿ_kz)TĄh)vsvsɷԹýȪg1i8mQ{p{ڴӳ '12_b0Og+u>a8Q-XmSJo{tPoRۆ]Y9B۶e5z1e*9r+!F]pMSb0-T8tc>[o;H 6-/ 4kx`fsc  ΰMxg=欔TMf}y{ٯ[Rͻx 6,D褠]Yigt@05.Og-NZ<|*mu2X 0Ƃ:bk a  6FH{1'&|<Lo\&qyFt48Ҏ=yaa 9c<"l:ꬓWgDE3 +N&Lǭie:K."K/YJ2ip3\YB]r=4UR\J+`q'^ #흱7z*W8Qe-GǢ;~l@czq;>YXdN ,pn ̙CDNxbAsD fV,7bjʰNz-}Q vv?xzSq| AϸXg_u:?\}rM~zo~ Y{6UƽXkXbX?]vR{j"2~aA-XAlhM2E;|?Mƿ& RڂzS.9c ?7WesxRL*Pbț!K=;w6Zծ!Q᠋EVhvB03ogC`eLd)'DP‚RPbr0¤[=t !DT(ElC`\qYmcLD"F*]@?|5q3"AեxO+ZqǃǑMUg6sνD+ 6w=/)p[hq]t ǕQ|FG'AD1QJėrE d!$/:P8 ,s¨ʛdTH QTqny҆ n)! -"Z4(|,{= [?8KX٩u`I*|i:*sa ^jSxwM,Y^Eۏ5mcH1$ %Iz"'EmʫTDj͊!lH61IaD﨧xq G$eF\ASz6Bl]ڄa +IMOP'P8q:Kո{QV-p2>%;6]>g>uf^jDI# Wcs 'ٹ钦йQi7W> tIM_Aiة$yyǕ;`(eF-zt^qs 88vwfQGg}nIӯE1{ԍ]E]"׋Odԍ_ /p ]L?77cwԏ~%?AFӂWR9 [ 㪊8ȯFg/ hPWCUx\xUjZ8Gڐd9O pf6`!Ap,y:/նlZ䒒rP")bI^z٬驪.&h|\=i!+^^!I<_ƞ .eo7vD-̱oncѬpA{wGNsKO1l]-p *صgB\'wl1%CwM[F%|)bs ޵T/~ovһ(ܲĿ>2^/]| C'mrTR8jlmmG]ȍ \8^LPI@<{\XMiZG3 & #}26">r3.^@\2xx{UֱOXqFYG~p ǡ.ჟ-՜bw̵1bK3Tq"h3#;QUTt2-\,P4J8Uz>g+ #h EGB2y)a@RT1INn a}a싼 [z&җQU`[ R-|& uH*EQ8(t| 4Zŀ^_c{"8C[Yg`XB-F[0[ԁ)t.dhNbz:939{_SH=r VDbKҚ {:S m2M_>K n!mtt&td R\/GF{wHzkA >}ӵ7ZQh\R˛퀴΄35uH# zԪUUWE,~J^R ^0\-?y&Z(15N:]ԒH%+{dP}+YqЊ(A?26{nQz< st"/(Ld,&%L^ꑁEFg p2 qRr6s9x'=YƆub^|dP=纹O/\L yWM{2w'Xa|xkϺz g˴wSssfwLO=ܴsZnKۛȭmeO0Ƞ?1t!nhtet3}zZَI(yqf;!lhdqn['3?=yg10̷=yB'="WwLuT=8JDML3|NF <&%HrLi9[C]FX![q>Tϥ\H}cbYPB\VdR: yb!X#LfQ>Fn4tdc ڂ\gmɉK;Qb'Ccg#~S}z s@cW1V!h筽o ;.U/-U_e)W& E'PyOBWdYn$ ?1$(t5!>X^KA2䊬(ђIȢ( &LXj3RD!h[FӫE1E#VC5"Tֈ0hA#n5c>Ni l5: K2l8B$n1.1cƨ.[* )DPhp+b$KZ(+*kׁFnv^XOj\r^t{WBQ1UTmQ^Cxߙ6Vo\>{~#TLjyhhz)|c8>ij"&1Ilj;ZkE҈Z*i1B֊rkJ̅jq41%e1S@8Jp)m6p0EI)cjFRMZ nCR]홓;nq˃r^K_^1 | >E@LlrIiL,20Lu9Pz=hCoIiT GCv$TuWPJg`ߜɾQ@p#Z6jt?Tj^Egh@6ʳD2LdAyU#NeՈS[5t^hɖD<2WAXJ6BKZ;#L2&38ܱȲfrH ai#m}fF9٣Ԗ*I3Zdp.o91Odm?.hzWlj/`P"8DC CH2 j ̓pPo sL) LhJiTIK̚ ΔmyA&c%:f!$Y&J̅3+$rH->3W!V#g|iL lܗ]GnvwEVhgL0;%8PuB . apy.Dj eɞ^eϣ#},*qOqnܣTHr@2-=V QRvHu5rvK8;R7oHzjk-Vyb<ٮ,sY'I)dSv'4)\ Mg/?2X6zYa~ho|ry64>$$*aP+X ~I٬"Net<+qԌu[1{r|Msz!]\.f寅>ȏW[;;Kp}_:G`>=d~ܳ|j><;{?71ڿ|> ̒.f^>An?Ⱥw.7G*onF8,~أ}ZѢ{m~4\B.[=H39 5z ruyfBd /nLXzݫƶS]D}ӖZ6_.zOdݶ+2 z:%ZOZn>hÚI8^/y[5ҊNPD̙ |Ľ Տ TXآlцګ1u&1i?f,159]G~)wBͺQ P>ܾ;8P^ 'J @.M(MV2R&+#ht\Z- xp#Nb*TpS80fvҗxA zz:f)Cꢝσ:Ko xBlھ7ޔwo~+n3-]O<޿`cNcO+}/o2r;{ȋ~!h)3LeW.hw&%YDC»m߼KLfP_`@jBvCW7ot?,YӽV7t^:~] %.擿-ŋZܽWyNgs~_bnN}yLj>v~wOč'E˛;; W j'!WeD^M*$XRxHȣj;$))zd{=*(eȰ AYΣ\\)@6fP8 /'db*Ȝs\3O%ut:yR'BR( ddݙ9OzO0da;.=Ϫ?uv'c*/1vdtJg4װv{K.h^K^70U4݄7sŅnk`,b@D%5ga1Ay2IF0˱f+4[Љe,2+ml|2vuA9g9!R]X@-BL.Tݸ3ngXd<υD8dɓS(^l|;Sw{w?" m尼n꽊 d;%`@Ӟv'E"YdX`sr6.9"-_LRrȅ`I*KoM.%)5B;K0sr,Z=B>I %%hghKFϣ2*bJΙ`Mـ9I*O`~|CBPꢠ؂cA3y2DG-QNˑ>d&l(&uw Pοz‹ݍOW]`+\>rc3OТv1;Rck[g6NЌ1 x5[pT=Z)s \É~HL3;;bs-`yLj#ݑsRC,F3eT%2hk]%7i扆AZ.@NJt%cSA3ALzI|?h}N-PuB9i#oTD;[ݑdDˌugyUA]="@?<r?W[;6!ڹ|Nv|v0}T%hMd&23K%CbI))b 'fY3*N0g'@²巄^DQRKkɬ : Jce[9Eo&m-m7a/BY5>yʪ;F~WU+e yL&Χ=ZK di3-d)*G 9&`S\X7@.1ٜL9gRR5c5rۋR΢ qKut҅Of#2_q6A눼&É [6$ F̍S)q- EA1c9P)#|.ceV4 I0MIw6*@=f]'1L-rk05Rv5xֆZtZC85 3|N򞄀#F~d0ԣ)Z Ua| VsE2!CȊtMB@a!%& ɨF>Fa} E1^F/ՈFF4Z#Q33tNKV\G>qoʄpd@(Sz3VqZI.( )DР HP =pEkT#F8/,uIj\R*EN/^zxG' to`gP=o(o48]vl-O͢ք"RQ5xt.cM%D8Ъ u%گB=qHҳ漄BER {ɴABH,C"S!Y2y Io18 #"E5!lx6e=1Om*Vps5{mn RSڰ7qe[| ]zߦIn}w3Ta*߶#|<)!L.'m*T(mPklP;0ԖmtqVA3A*+t'TZכo4KY$^R&*mb h}jEKrY'$2BY`'3u` #X׵}2sfrhu.)'ը7ʲ+{[[ShgQg "XU[)\ KI+[GH;7 )ݹ>`'ۼy*ڼjqϤjKztخyK3ƕ>HYgrF4I\f24;C&]ʍ`P+8ŷ2[oG^:y2?2v} 7A_l;u"|jBh|H$$*mi}!}qU-M &|Y(d2FI, } Td.^ V"Ȭj56M.g;qϖ_^X>(=߿ }+az&_//fMurE ))Oųp%Yi)$p\+LQQԐw) ]K*G4ݔmZ+Z4p,y3,\^\- f' ˺ru}f╅bow,-/?ݘvWmlz֖Z6a"'n[C^ i.Im;M#Nt^!-~+{+631S>֏4~}:r?Ɠ煈m3OLҢвȎ :_KV18]\DLlr]ǁrdO%;uf]N:R:?9P~(5NN''|@./UPJA2R&-+@F(+œ,;jϙ9.0!fqv?x㭷' .^W]t Kz݅ڟ)^XyPVʾOg/Dy>S, 4BnMȣ)QSE 휋ezL2Bj!(< U=Jg )9v -=ۼ yH\GEg2B]H!2ki0kWE1H^<27/2IF0˱fUŝeqLt>B\PYN2#Mg`V-BLWƝ9~Z:G~?\Hu" %HOhwP,?>ܑd-Zĺ} ^\.aYΠgI#G%Წ6N":@"Yd62Kl\r EUeBpȅ`I*US2XU%)5B; Tho99jw FΑBK%%hg@KJ,g%g .%Ƃs&R0ǘKZt57 ݅PNk53'*C69`hLoYa!{O@:qtYTXZh_ۍ.IƸ$r6YȜ1&m xYJ˸̙le&K*B^GE{w226XBE:wY{~?v ܁rnWYqEGYr{y9&ټp"鉤~Mog%F^2֥>/2I-[e8lϠ_6(7@(Z{ i[",ʱAcLa/Jvo=IRĕ>( B5MP gFxT*h.%?>WHk:h5⠽!6o׿`J"e6 l1<@ӻ *?6'_ O3~F,'?&JO!"@v+ gtixҍP*:C FyhX!\=k5xH^!"$(ӹxHIA!뎴L2b%,k&zˤ\H}ZeMVtE,B'vXn5r[ZB^hV|RIį9MdXuw>.Hc{D@'wa3}Pe!&<1GQP6e^+K /"3 S&)ҙd b IC."I, [*Lt\(%"PKh[U< kH_@l^7j=:sp<(tl!BʑC9,[q Ǔhw>{$I)Zr]Y,Z7Ӻ[i][[*S'ֵu+=ɚ!P2YfVL`2$!kTx}wSՙrdąȈs r .Gf˼R$恩,N@$W:doYoI+.IH׋_i. MqifX|ҏ^yْlxɗl4wU[lxP ?58LQ$~~?r0plSS~i2AÛJ2nъޞ_*>ci~Yyt[&̯Wߚ^nq-o~".~a-Ol:BöMd&ci▌rFgj̮nHy㕟N. \Mg]5ϖ?!ԢHs1,U "ɕκ Md?`ߊ<)'(ĆׅǾ(:ujc?|ijpA ^ukՕpY%RתuY7sy.!,*~;?&-涔 ~jwJ"6/yk~ޕ,2_ >,%obE5>%VCYCچeS]]<|H2ǿXtyzE?W q@\kFlUܿyNEг->(p7霧͙p漧!lSUAEd 8)* .- ))ZA'ɞ!X*BTD"E `Uƹ#X.(g0s%;{q4Txb f}d57>o쥧g "O)pG09ݘWeqQ*nEuyeA<e,!]Znu '1bePl>GWk7CK,ʥ.+0 ֱEqt9 :IevW~zv;"X3A0ɵ 7\ {1qV{1gz4‚S VA| gLAPB~)oC)Eb 풊|\%p(N$eUj۹F LhʱWWnZxp v%5BJpV/ϳZAf^.\Дb*KwSZgBPOi^S{"WD'j=(D9 FSM8{M*^ϡD~5 JTWLh5H4b$oe=Qld # /8@'_~>!Cw/1܃ R_0r 2wﭙi X#jt""TNT2ӀФmVC|"hE Ŕ]M()g G<%S8M#匔$ ;٫SKp3"؃Eh0`3N&%?XZ; 1l\E {#g3x7:툚OPwQe]QS<+:u_79v\ɏWJF!bÒP%-Tkl J$#l4r |g^;.} k̑mioXUIPqmo^ȾHKg&fϥsi\=fϥaͥ͝si\=fϥsi\=fϥsi\=fϥٳe\RL,Gr.6o?`Gbkؔ_ mib¢@&!~v$?9;푲8-Ye~5nXGI .f}e %s7Ee-c@g9l2CyO"fooDs5vBvdn|{=hLikeOIg`dY^7yݜ9X3MRy,/gE$\Mns 8]mT [?--h.rEppp̼..W+3/̼0_na.YyXW/8X  V{v*DԳTj߰y`7 ?Ȟm2.]0Rܒ M' 6,/5<0IvPMfk-ޮRlꦋq̓&{`ųy_4}vE_g(gp@ye(;[=9NzuyPA5 a o$E!D0 BIAc* H(mM@Q3g<9G .5{ǯ{Oի? o/AؤL[_mtmS~EN^VtbΡ/j/PFȾfn)\Ϸ=Dkϐg.X`0%ciWt 0\(E٫'eN贶:چ 5kŒ(dPڤ`rg41fc*b^Ahd[EĖP:aFhNPƞAlolS:)wC| {EVO[tRu>أ'7:uALƫ/W5$,('U%UL"j=%H,JUM NT`/TLx1IrrNӹ(.TheTZ=H12eƗ>xbHhrxמ‚T@^4:NA+e=(g b&B $pA ),#0=CJyefj{TxGDJNcRɜUJ3LB ؼYq=w=*EO}ź +d" ( ʇc&1eAY Cfa+'Ix9#3 B'|QZ E(LndA:  h^H=/]u3#Mur%v,X)$h=y@>M8zCɽsDπ&hN|NnAU'Zx_azpY%'mXnSUYwQY NBUx(>k]QwSFh|qb.[&ftu͙K>=<0\BL_ld[t;j^a?w ` ôY hB`[cq_%e\q1zڊ]]Cmָ(ָ0u693L~ =uOWz@O&( -~q?6_P`':"qE_N=3KN7D0°n٥l^wf:Y=%wsMWP 8BrTirVzJO+}JR"T)8F 4(J|kEѴho{q:qF{wюU6bBb+`;;,(p7霧͙p漧!lSj p4S1U\!$[@`S6SN<&!X*BTD"E `Uƹ#X.(g0s%;{q4T,no: gMmF٩W2. +b+&&VL: B($<3`MLj9+69ZI"LSf4nmy^ L7Jņ"TAU1Kz…X;{-USͯu5zV`+9yGiG璘.w=|W#Ng}i%Xt\)K-l [ȾϖX& x)4;*V Ƙcv{9)9v dRKJ1cͨnOTy;a~=? OSͩy\^fHBP d7|+lkp9K>zR%c6(SZ8S2 /7.ZmkmWlh~UlwnxyָZ燻hMku|ϏIٹ {nd]G|νS֜鶧WW(ٶ-`eQ:,y-/ds06UV@I:J?z "T)JhSPTH#Dyϳ}~9':L/`w;ϙ(:5$Fu8%7Vhc{)Pʍ"6Ȱ, Ƹ#aREbD h컘_olAB =j%@MrKyHGֳ%#CB c|z̰AP׶)X q5D:Q[F"_Q͂_h n4όy9,e{DmXӇ:~crR>m#k?/P(BT'&o0 JDEDp)<`$9$JF%F 쭥y'7(^s`HjVc$(jxT>s>`.?{FrB?%CqwX6,r1~Tˌ)R!)y UhR&9zp꫚GьKn ^FqtGlQ8ihB쯼1f}қ^5_cZyXI'!x@1r|L)cTM!NRC,N1&I&^S)C!w1YϢhRNCTTqG.* gf|EOՅ e7Mr,tf1y-ɟи`e?|kltEcXHTY#ۤ9;2Ϥ~/cx8 s=,MwV+5Q't =mJ)5ۏs]lwl5Y1vE#OՈF4U#VЎOAȭ&f L;Ω?1!I/nc %Di1HMJg|P`B@O ͝"dpkĕY,Kmah^z*E܃P,{/V_XCY^5H}B}繽w?]yn@w?š}?SIxwB 3KDO鐔4yHtV*&O[/ߓRsBk%RDL!)2TRB*SHe L!)2+c0B͋iKiG+y[Q u!VJ[j8k8UbJlT*QyĨFu~ΡJ-T*PBZR UjZnS*PBUVjJ-T*PR UjJ-TFBZR Uj_^|.CՋЋ]чvcJU*!'O\,J!Nq? f2hP %/@vZ}X$6VKԲNeɚ&;r)d2KN%X?)mkSz1XbPG ڨ1Vz#G'mBhz|Zfv?F<*t(+C9Ml"o$zZڰigkpTcq\a~IVe/Pw0l6`5ftVO)HJ ŢƧi@/^8R\#b"5Ad+!9eK=Kѿ=d)7zVﳟ|

ĸBx!dVRk}9fy Α=ඪTK^ûڪeZ[86B↖qLyj!#^"R[ArEHcEGGBGZc9X 9Lߢe2BSm" 7Y)k] Q_(ڍ :11] I`BRwmEZGBZVpVzxm7lF_1(ѥûh-u\lh8c#㔉3\RZ=]dq'G f0:? w~i웃Ks?x 7-&1廛g&Q)ML $NrT*g8P 8-F^/te}}!v~$yS_~"9]D7֞u,_9ek E˪TP-W!F!Hc ,Qk$gJ:RT ulી ~tsaMFr֊N6

IU<վU稲f>sq}Y(K(4dSL44\e-aWx\PzӉdHBS"SǤ4mf g,s!f&QJpY䘍Ux G* ԀUzetGp>pJF *J͖.H^݅~^\.؝^{̯. <=8/P_{iι=h+azDWW5ѷ$d~ٳ Z5Zh u]wh2F>/xn򧫄SCY}P?^yUO7ZC;EFi魏SvdpYKϣaBYrZ{Ywj 8iE3oZ|#[){~z1 ZD #hX $'r?ǤlX Ee(4LjMeD.4 3x%LZNGJS$]Tl8;l A$)KV #5.D$)q) bcr7'1)9#%")6wv(h*tB].vD G5J5xdW&!!ÌN!蒌T;oV(,Wh&7PQdr>yEO BJi qnX OI`Y:U)cc9"cګ~MAï)6`P}{qr V-ڣ?}` $)8PL#sE Xk Nr*JE!R XI{D9-(gi!$,1i͒M5,]$Yu8#JER9Fm>='<\7rDRbqB2I`|c("I ֖;YnQ Z*y!3HjI4@$(0RRƹt[o }Vg*#8!d`$PO1&$VTv`H>" 9{H_?RFB#Ai*ڽpZ3pVGa3KV֠@Do,JFBIiiI WHt.Uv(Uv0"ALAsLGU2VDNd@#\) >"GΣt >Y#yvo֪ekEv <]M?{BVcxS9B\Nd (4T>F&9 ( 3.m^B:X*7ޡⰔ*ku3`edp, ՚1+]j8%x-x<MӦg/V^wmB >ET>WTHY_ERpo'IqȨ Ya@bubu˲l\}=h/e69IG9!}r R@ZU jMkaŷrnXjWhtU3&n@\@WDDZPy) 8AcdÈ1Z[Z3FIRAGaJh@k!iTë 0p[, M-^+9|ߨS\ Lʝ9ssY -Iψ? 1e8$GMoL O \NG3+ trGjT+(w}s 7-Ӓqx>SimqEgyWq>a݃Y.6 rF#с-++`V/8 0>9c/0퍾Z` {Khws>)ɥ 3i(Aw)!)J5=]~kr1:n&DY/ݠ7 ;;e2!LɅGw6^Ɣ"&au$_Qt#=׊޺.dF- l3lQZj.*/u rkr'>ηǬ%`8m7Uȡso~t[If,qWkfSMr.ՒM2)YOp(x JC vY9 mv_ggj8͚P]saC&ըmߋT+|gtys׷lA}莛DE@Up9D,O6Qg Zڔ_7=p,,ﲴwki?68G̻8v[?p zւepnt়Q0;>. 0uWVHxL#Nن@nÓMΞ/}zFz*5!L&FWR% 8mχ=)bA{K+uFsӉ-H?+S>%]oHK<2a:9H~jf͍s\iބwM "&RipI yNE]\%=X2\\ٮh: RϫYM~xv9/l\ #b$PRj"_3s:%*xbAsC̠'RxtUPf eP 8ahI_49K y ?7IJ_W lloF+0M\f}vMaלn>sybiUޚZ!f~,=/"VLU6>m|aBPn +Bgϔ8~L9W͔8izL9r=CL9fu)Uب1W(.#XUv!oROmFz (ׄs8s=t*KT5W/\1*d!-gBq%9s*KH5W/\q>dy~3}Rg{/sU7WYJ]Ջ4Wqmp>Z`'S妋>|p+Zx1> L3>`2 ؛C3gΈ9# ([~zv `Fqu_'8N)Hd"d>&DOD8ϔkB|򃽲DhR VT郏:S$Qh;0`䄶aTr}98jtb _.cEˆGBhT'?6iNO@>^mr3p3 .>(@q0H5E8F#kCG]xsUK1u96NrQXj,Eߕ]h[{l76v90lP/ Dx!"$PZZ 2 Gx4% hH 39AQp~C`bҡ} lhp}\cIy_n;}#HUErqБl 6GF[8VdM?,+>`T݃6b-{0'G%VLHKIo-9vom.0CQLSoJ1ལ=(ۃ*Y* h!"2|Y;#(cJb\A:j mR$D;MMq j׺| ouove{x﷭3nPu o_-Bm:b{O:NO_ϧVbKϥYcw*-­ͶB,`ɦ]׽_|Wxs]5/\xM}CڼΏGi{wtn8~F.7Sx SG'pN?PR^L9ZfBKd@N[j'CO/Qg@_ 1 b#|!ѾsHuLHP'A;Ruy >Za{C[O"onpx>:jpv&ԢR&"%R8'1FƊLeo#'@fƓd|I3|-h~O}'&9BI}+0eӓ2QI ̝}'Nf7VzfHT*zgP4Rtn%j>.ȠHpJN% |("J!#zQj'zP!ƠSV d" TiXL͖V)f iƮPV[xTp)`:#ӇE=2ay}w@cbz[l IOTLR XT/코FpܓG%ݐ=0A6 Yr'yP`JmpxKugvQXp1biǮV[ڲZ`fy43R @$1zd>^[X&!N4N 2hϢB[""hoƜ^X@ ѩ8aԏ?"lbǮQZji|#Nf}5Me:h }8DV#HYY)!Rq$UNy!e;G%U,jѓUiE,&fxsq<8O{ŴdWh ESb ($X#$:GBTVi')XAb_a1.F|<=< +x(4jD?,?yvjd9qtxj7 2D)iPɆ4@dhϙdu! $ ң)Pٝ¡dg5DA q2$( #.pC1)($`95L 2L!2JRQk"tDs) hacZgC.xx?/>彆D>A$- "ȁHtpquoTNG>o'iBߗP{L/|8a$B+ote('dvSF$5c@ !&a /_P|UuNv+.Ǔ81_3$'.'_a?i8_I)$+d0,Eg$έ_?sr6sN)|MAoW8m]ejuY">Y'vw].m]j?*?ƕͿ`>PY~z2Ό~_~̖-d.MxGj8<܊ ˓AvzNO}qyichÑ8/HTnkuА?qS7aلڄބˋ *"\QY@ s'L|'eQPC$ ^PV6imÛ)jD֤b=3~x3yj /Ok9~f0(84x "5gFBI*aN fkV|1\#&TQ1&y&u Dɤ̜A Rd”'9cQ Rr=r.DX@A& Ff@&b #bl4q7e'GN\O/';!nvئ.P-'pW2 O N,P?{WFJ#w {ٻ.`WcYHr2+vdIcn턙I)6]U|X,31%!{JAL13ɊdgD[{RP8PF.,D#=$KcIR^\H]ܧ{qWǙ1mKG\^{UH]vtߧh>o+gB= CryGWiԃI P[kC-vu!:0NHX(~7G+m w˖3Q}niYNwUz49?q6jP>[S.jX(M>nbWqL7 I~?hqwRE\EMҺt3p E.ErwB̢nkSs6_GkU[V hyЛTΖm3a~IFFn-#«uw/zzhfQ̢/u-zuŠU♕bgoX'Y^_WN^J AKj߰ 6AaNw~^g mOow;R7['MR[G>t53bp|nqQ;6xOiyy@]#cY2A]⽇Ea )ocJb-UQ^GvǍqwæNPie΃XZz͢:ݽZ( Eʎ񔷧Io@/J{14{A&TjNbLPyPb.f=h(Vȴe8I2;,p)iuxbOPxz<ʞy͞pfG4x[5 O!6ڿP=(/'ܘs4y7ֿ=dٛ H>p2T[PkOTK]쏔eϲ׃˲ֆAֶ<$ Mg BtmZ E"3<ìf$J)~/=o ɂ@7B4*QǸ ָ ,y d$S} l.t~_]/(V/Cޢu!3L>{u.ˉ>@ ZL[,v'^eUvB@ na-l 6‰Vp na-l6†[p6†;p ;^8Na-D, 6†[p na-l ZKKzB/Y% d6 Oj2}Ҁ ڿ4>mq~?vxQ;EzьL#w\h=kֵ,6Y޼@~а+yA>m}5]]۶wI^N}^[ c:kw\gCUYwbE-Vef: oP}o|U ^m7wHtNO*vr3]5P.OͿ90]p†[p na-l 6†[p n =ڳTHa-~da.0־DA3mF\>(!!0ĝ^%%"@3} zao8]?Y{E>Et'0\(DMMJPϨ!%؈.:$b""f. 1<1( iDc&"x3iI%\^S)X\ \qf9yGUguKKM&P/l#u.P;kG7G(KO-FXBgX SAHTm<X^,~׶1Y):SPEukUgAsO`W^ dai +' F 9w!ɹw9K A4"eĵzGjEǍ7`F/{t7ufq^yW/ CLqvWh27gvm}3y2t6x;q3%?E:LOߚn"Z vTu3,E/Ȝ8|tgý˘RD|כ trЍ0J(*599׊Old6E$L][ Q@ˑqzBb 1I퉆xpnʼn?vcqo9 ]`Cw!WEE'YF&u2V]m A!@g'h[7zTFŷܴ;ʌ$+ T.W[!OVHeD9 NنܞZў_ oe-讔@s/Ae&]4#S<4S̓O# dwC4{K"0hhfQ\:m܏A:!p>꼇$iiHN:S'&9 ηk,=6zLHSJH.XQ - DH>X_3A:žCܠ'S ]Cp2dSXf+bx˧8./_pMȡpRUlKW߾/Ǔ/-&V{X%~8b/kж/,Z> vm}Mzr91{yCS[k9MČK^Am玲 =x/PUjo.N҄(؜J(!|z=JҒzEV NBm`VFÄ- _ w^|ÃT烈 4q`S.BDz^*Y'C {h—02v/RbFs4H;]1Q}`\2#"P vl픐dԜߞv17z'?YM\* qB),Ie)SQ:"1.oA7 ҐNO>OvT̅urg&d[Mߺt 8\dPvr_|j/HZ#K߻8tBH(Ӧo6訫 cu)C$B~2 yHA›wocȹɻ77 " )^MO&D}'(.nie9~Fˏ<@F)sԋYqd.{45p8?0Vov` ?h6A^h93kihtmo:x$/"4vwuVo?jTmgݕMqŋZ\-f: oP}.=pצ,eӓ Ex93i߽_=qy׿y H9wFKRD<iWsy EJ}?FWA_]lcB!(uW7#|U;4xKl5dj uKN3NΪxU1\r@E R,U[ld*|A1X-10.$YhcT21 ;1*D,N@mrp0Oi9'o6ܵyH=:OoĊ-GF[/;Ex}7?[-,kpX{.{r3nu z.[t/m w%ͣmBi.av>eoA[mc{:!u;|׸zE+-wt2~*N8v.~uOƫ4Yǜvq:~)_4=vC 3Pt!{꫞[_7gD+ڿoƳ%C?6?[8ur PѦ8~$*?.^A@.~G\'cdd)iM"\:e?RRƹIRJd*O5sNo&4e(*f\rԜ NH}'~ER5ٛvVwۙ}3U>f*:zg4R\ą[ 2$8U:qH' >آ@]moI+|M$w;,m0,y%93ɯ?$۲cYmKq;%SER(RVzz_BܚRCZut\,ZQUR}#coGJoX؛f< 퀅G6 -/3>_țl/z@'o7~a/I([񾦢XrEt)hHZwq3ȔZ =?j ً@Dl*9(Wŷ*i^V u%~Nir1{ӎ}OV=Xr 1 sZW/g2%h$^bN5LF!jb'XS%&[Kn RH(N5{ُ+~qS~@ @.A2E|_ͣbT˟R 0 ^H%Vַ  C5TΤl٢SsOSѡέBψ؛8,: ..6R7-y*.q1 8Ûm.#l\xO)f(1 ۮt4Ԋohye?m|6krTUOcUV`)j/`3+sWz瑥)N?Cqj W1If Njո\|,'Ys~cf3KZ|y\<)e٠P+(΄90DXQPr5"< "4]ި 0 0]4D^J6L=-%ynE "$:%Kю+,!hOCqNT$\UBh.YQcFoi*i@@sc[G4m2btSg6_~(섞y7(rMY :ZEUo Ro< O1OޟmW<8_Cp2rWN @ 9&]d[| 6`1>% P XF[ֆ8ɐ{`;6a}F HQߞ8/̀7B>9jňITa8]N9jq.}g@MnD6{uv7xpuu"kY2~xf6 F$,[.æ=s|gz񧍩 6YMԫj~;.f~S>z2Sg~ZMFo#]WSa nё&DkO嗛+w&8 AVRjAfmx] SAWjቋ{=zx@;גƖ]MJbg67PhZcчj9AzMs=8ƱwYݛRBPt\ ]/s7r=S*?T.wp\ObϕRYOmTJ1S+.ڔj{tQ)0糰@ё0:+"婑BA|G149dlDʔRQ$L:`XUnӗ86Cr⊪m =uAC!&s{n?|Ȥ=LZoiEbJ|U_Sn3ɎuEoB1Y]:bZ49I&yGHD^vp;+~rkJpO"[P 4+@͚# 528(bv If`˙kk/B3M3O.6t8>̝@OLn@iic">Ċvo~=b,G(5ENc,Oyй6JV'Ff$U!8KJ'0ڊ(}NMG&MAR[J2jor#fmuZUT Ԡɶf6J^)̾] B6[*otWy\W6g(Rƌ8Q{pԞ*oA1 )b fObtjjRV*Z9cU)h2[IeW|4Y~yC^F ŶZ`|p`Fz{SN}4C0ъ1u4G%E7 ϴ w>:*mq˧B|f? $c3#IkmTԉnYؒ1*ʥ`221LrՅP56 cϕRr)@վQo'FwQc;O$|ōgᚿr$j\z>1U` 0;i7whP%Jux  ѸP  ze5Du ;6Tgb\k]de( 7t^t^ Zk`u][Mǔ#c 2bȚŨײ,=F]#fV3k}DM9MM>EhCY9, [^d9"HJݗ8-^|6[]{?;Zzɂ{*\)VN~Yg)L5[J~W` N%wQ] ڳ~A:A:8/+ %cZ<m'WPڤ=*F EA! ,k46f&wܖ{.eC˕Ml ɂŊ[8[y+iQc)$s=n*[29Q5kd)F-]˩`(VfN[!NOD١^XbEqm_#pt blۗ[ula^Πz9:)3o|.9!.ZC4jDz8.;3Z`H=S VTêݪ 9Q&98`֜%UME^/AAۢ+f%c]ϝb YNd)G ߏ~Y0xh7qIi ܄M}׻Ӷ 7?_,gmf ~EWom(xZ.fcJZQG,nM֐ĹqAi?0^p߉\^,׋*NQn*:N'|=l5;Tp3񤬃:rڂ-r7m4snm)mPES?ס,I wZՉX':-b4͖-:=W,_%KuC 5t2t|*,Gr}7zF<j>2JϧZr\3HrژplNAC?OdBw[>7`pYX&|lr,av|.o?/^ΛpYEb8Q&e(VM_L 3j~qZc Xcsx7`vUo! "%%ե*H/%G=Q2'aE""[xmd>u2o..ƫᔝ|]7uܻOzòNi :u?2YBYZzsi¶tI -I`҅VyYQ}:g*EKZy+mM"mf|z}wz%.T|*|w} <hgvq6#7-UU_͏Ïp o]n1{Wv/_vҼͪl'g!f6u8Jx?o}~,XK9@rnΛt͠=bNJYc,&-SDŽxO_I߆5h18⊠ +϶ܒB`zGw0#ey2fQt2*!**Oք) -(1[*D,%u;˝^iΫYsUCv={8RίXW  $p9ձYt7+Ν,7O2Mt$=Ozt>)5Hy'1Y.Ic`![.2tQ=}wP=Ο.2Ax'ik]?ٻߟt^zkOyuzwHYX[hMs٣nYVug5)t^y?zCzrl=IC+ pgoFa΋皛m u$}kKfyІ( ;}. '9Dkeb9k|0.؜D$:Em$-0tH3Tw(qR GЬUG'1$&gF^qR=nPAT+`[ թ}Q\(i!wеDF2SOSurrhQ[ai^uJB*EI* 1$nY0SJeV4b5nUU}@2}DՏf0=q3;3a0* l-%JzI 8sV=gЍ5Z0I25|p{:Cgq c<6$ % @C:f\rcR2z HC18w<d)KX;!JpHm|e׸[uBLF,)xcO0pkÀ42Fs0!6E#H$OA3 '1[ϢK,NQQy2T962BqapiSkq~@?| VHhL Q)+2UVH%6k"sDk%n Mc0@^0< (4\d{8Ce%Q3uێ B$hRGSpgl?ƚy(Xؖuì:$E9jRt1Xs hJ _ XntHH^8 (f `4[/ARVtIDbanF1#,!Ii |X#̑k2iMLj#yX=2r+ `*΃q*ߘA $  I'eDk(6NRpjx`yGH*)y2!%̈́NS0AÌ,AU{ls!u6JE0/ڎ;^{ 3K5tJü $:+IwY/'>5A؊eoʓMp=~+`qb}Bd '> )"brq> lQK/2CNbaEv2Y&6C~anoop2҆fz1Xbf(D-xk"` B6*Z驎`0z㻐f;4,2k bwړۗ<\Q'7/ -|4$D.3' F&).Z6b5=a#eCǝ&3@^(%qeF90sD$k})) O=3n!&#,>#Ϝ׏Z['M.@6Ӎo9v?5^4ovurpѾԑ9 flWZsF(%1uޝu[2rCe Cs_gt1Xr L..շurC@W7eM{ Ȋf߰͠Efmg0a[d"`Zc\h[ CDԡ3 aȌ`ִ05tpu8؉mm@ Qj+nEt孡+D[ qt( ҕЌEYw./iS. 㧽54|[UG䧄?߽xֻMG#&Xi,(+_O9:6 UƩ\}-QB%i o M#ڇ6wiD;UiZ#Ȣ_NIYF?< MrGhv]+%3G, cnfjTѯi'Pr{eCg95]~5}b{x/Sޤfw-z伖eN#{\@l᥷Ef]ڙ@L I[$ n5. $OP*hD.mQn!)ҕ!5LYFʖvN.yХ`AU=/Jw<>UMsX'm#9tDxi Fyk]!Yȶ#d(k :j;]5^j#R{u3 oRâ+]zJUL.m+D+T]=EbG+Lh ]!\iBWVC+DiYGWO3i&c+$+"BWv_m7C;zt%(t~"֞+i[ ъWWRv+͔%&N> C+ϷdD3"RU)qx iPJ*;~4-݉kՁPWv1o:qm[ 49iτ3¥AX(k;:錵`&B=U-th8tBVut9ypߘ 1+UC`+`M'¥-h99tD{i9#Mt)g++ZCWVu(ꛡ+fc%5`$v3|ϓifPfs]Ɏz Ʃb-+,>\B:]!ʚAGWO}&[DWX5-th>tvt M{< .7m+D+١T'HWiBў+Z1NW򎮞$]iΔ9\R SA"ە1C)"g2F:M#JOQ#2cs\ӧ̘mR<}MjVfyĤ$ႷH"`cZ# x" -S. DᣈB-Rmj¥jDL:]!J.;zteXLVaw.,f =?:r'pd)GWb_OR|Իh-u` X}|S&"Μ: vȬ5^4~(\=ՅniY5:98Z$d`YM1Bbr=5iO)9cHK"(:v:Ss!K|^M>gTz]!/fe襛8sI_(EQZTit樛nۣfBחKJ%-\5/#.Tw%>kOpiy?:C𹿞*EJcܫA-@'z)|3%ݿEڝW/Bn#U Pr0/eoi୍c!TĜ7~s8Xc>-4HO!\kB.ߝ;)//[kY)0R%JWTc, {xįAEMS<߆B.yr85uoWupt=W?J*Zߪi)kUOA\xx\zLª̼'FO_zހ'RL'@SO׼˵\|]&qZ.,^JAU0YKнWms)ԅoTyi\GwGk x *S7 "t n0@hWB;%_j5'|9ÂŴc42KBBg2}L_zjh ͫ9%cLh2S6>KbOzgЧ} l:E%MzW0B˜>-m,C|e(j1驠? @` 88yƋ‡IZkPLBdBYd\ii]i{n(w@l Vl;QEwRiI#^`v _o YdD/͸Ntq`G/ĎV/xfUWwѩ{ kP5nXz@-I-Oz) ߨ^{d֊c7V.:@F"zķObxIP ys|2JNVszI;Mrz \/bTK~@dB YRO>!|Qg ΐu$&.GlAG閁9lMQ)j"cL^h,@]%RCce̜[33c-\\Mngϥ]<[l2O_m}nJ__vzϻj,M~Qu? _jV'&mVӦ+[]۶W]kwlJ7tǥg_Kw۫漘Yޚ d%_»+5/&w7ױG}ߣ3Wg_)lu]5(]{m2Ϲ9mWj;Kq3];awZY*߇~N+k[*sg3^y*cuaqY K/輳7m邑~w7RG}@*Dk SxN->ϏVUr(i4$݉F%&RH1aHdM@2gB>Ħ\#R&$C&(Jb6IBQxDLE?Udc}̜[AcRpJ0iWß^K-z |k1PHgx'Мl b=?TYEIcN:8y-YA&R%\0x(e\IDJFG%Ch0D&) Bw6@Pu`hI{Gxvqv*fGgS-wJP& &z %bTSC6mo9OTw#(D: qRJ--M#Z{YR61>7s~~ KC aEnXRD#4+dfBN*;ʐeBbBمg~|'3C#"8 _E7 k5&I"SPa69g $jʣN6F+uBӲTlNeLN't4FfF2ȸ8]8Zg^r(.Ƹ\pqMc I52z#S(j(Ep% a"i9 ޏ/Oq壜L8AMCT  -{C2eSF[KPY2Yϲl*$I&gg Vה: bB$&(䜒QҚ C(\FgJ-"%M'# +94U͆勛ngⰬ;p~ݽ*|+|T %BG&,dE (U1usLX=p;I޿,S7";r `\iWXJ*5v7/o$(0O&U=,rG.yх@Ր3A'oPu:@bF j?Ǩ\F>wO\=|@FzoQ:5%cUNJJa#<f(5$j! C1Ż)=qmI"5v&K/tIp>/D} T>=< b=g8s *RF@vF|ڳ}"ZM Weƞ}~Y {zg}jPBӲ܏YϔKI9k0$:N^1/>I\_b RrIie"㛙{Y8ˆ{v| iѶ sͺbP/,Z5W㛋wOot~91"t>tv s$X8TGӑ.O-8FO}Su.Th$`MT XA$Bd悳jgErOg1ctuS+Og3E3,MVO늾-s%_1L="P*Tf.:KU)PA‡\ \̀O/x^ {DT6bYc&,18; iJ(z :1x6NXlOz3e&>q>}n}m3OlQ=D۾ZȜ cC9 4O<K5a(3j:DL"'0yCg<@ly7INdRrI"(uaMII; Ot=Q7^ndsZF?K1R*-a 2-"PL@&mFEQl/~`]qdhQDZ+و/:B_W7s/1X-e b)T]T }prT9m2Y 0`t 'ẑzJ&-&-{߄цԇV~ }b7u6{==G~L?Ć*8JJW|S ;$nq=Zd^]>ϰQ0{=cOWn{Ϟclˍs5yLΜۛ*j%7kk >+5lr{uGnhū>,4,Rm8>y1/e-:mڹtGeM`kM5/&9?74P_jwޑd2.C ޾: L發-Z*]XRՈX8tYc9QR"sR'w)&(E%;'輷yGŢR|7l×z(_. /z,)C`+cXB(>w— fH1~~!Hrܮ>$E<Ӕu{z(#RȒlyLzZXSt<ڮZ->7O6%F(Aj8d׭Ť6^"e\Et &Y"~ǽ{xmK<Ǻ݅}y?Ʈ"tc݄^N foѨ[YJ$F[z*dQTF6 *QJH"XF'~(]{bS};v ڥ.R?DjgMF9T1VimN2ydz .QʰRx-j,9ƵY#Ke}}Dozڃ^ן|wT<}?M-ʇLOH7y,\uRn'oc[^v}SY~*?wbiJ~7mV׭bd]?&@YCgh㺫GްȍJoqNU\nq`Hz@njIJS [C&Cr$cG^*?Jh)% kWí]m1eR|{3L˅7p:zV KVi^xv.k"r(xJƬMg唷+R(%)]OUǺ;j ߉߿^zTd+\gVPL0 .պWA6&#GCؐ /dڍ @Gh!ȲqdURUWTcdÛ.yռ\4dɩYi-&g5s6Y|V}m0wiwj}tYؼ]@ߎ,N`]SF;% ٶԼ͟ڣ/5wU 13{8_haWKMNC?Y"jkZjS7^'tf~2MFjj^^es=Kg`w]YeۂoCu]Χ'e&uVӺ}d-v\ko~k7;[H8iOeFA1"/e!˘Eh'm\$. =WW̓$UItȒrۋfC;|M_FNo^ \@^-w^z[~Eڮ'cw[:@n]OJ.W6}"mH۱SϵQB= 􃁫*~0pUU҉GJEaYPw.5 $ 2´~# sHG59/hS,[ ߽M~yE]'MG pŤy<8C3jǢfIo*Ek|i.6jȱ.i2N#,rĻd!`di䌢QVx F=m\UVJ&HlZ&64gI\Ԥ]-)x{<N4]A\3BK7&#ɬ[񼨇fl}ss*5odECpnmwZTֵv.o}7l:/pO'4WHJ NèSs|IQQO=|~MN-vHCGoLdHu"%Xgo+cs~ _lq)zWiy@[ PWCz tH{-}[!Qm.2Hv>Ux0;N>Or~w>U)FoqX) ;p6jVq%={QJi#\}pE+{ήlՔJdʣֱObHPԉ'l+Oݣ 9~G8&7>2Wm_> +``C*Uʭ0R*$ \UqiUV{RW\]5+C=lޭJhU+g5'itP .po (}lN5~#)Z*jj28tnt]bXz~*8KW`溗Uğ;S\iuCU1-*Me/5K)-k+W*vZ~a ǤT ,(y 9)ڽ/Q~fw~2f敱!r[|][o[+F1}8f0Ŵ}3Yr-i ~dKv,ɖc+imŵH.2צXkxhG`6D7bPm t*@`QV]k@)@d2-"=:&oCU QXiqAAz5r?V3AC("ep% %X(aE^Y` GE*1ˉTIXs˖`y!fQq,TRhěW i(U9u[zˬn +J1tP_5ۤ*#0t.Y A``-AsA٨k4O#Th$}5ϧU\AtBÁf=0HSm}`((;5Y:ZTkH繨)@!  dBqon |Pk>spx^b,TPs1c4Jrãl幆rNQ.eHt GRT"QZ;`[P}ÎzM &iB?z1_+Ap trplrm'u6!3 C ;b[ɢT( ĈOZ7z@U heH?x̍ڰ(RVhGϊSZ]li&fV 54֠X/㢴5byPdҧ"HՔq'5k$8? EG|57@KqGZsH7lۂ:h˙ JfH z΀=`em]0F|P)Pk2CzJlPb.⌖Tp+@/4i3hΆbXU?]|  RL#Y 6N)Hx$Cd ]XrAS@oAHe;dr^Rר"!w*;cg[:˭^m#ϓ:]ō|k*vwu2jn L]E u3)MCvRU߫%_^ߛ.ֲOzmY+;LvHN lǸu`@̾x'P 'Ы4.:|xq}G0q} _‷fq,p} xཌ_;zپn\2~mm}GO3mgL4kVO;UsF) %F66lldc#F66lldc#F66lldc#F66lldc#F66lldc#F66A(H6&`h5s/Ɔ('+ DLzr@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9^tXu@N {0N> Zt'irJ'0@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 @iCrIƵ<'8'u;ڊ%'kt) '9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r@"'9 DN r8ZNcћ'mi{~{}Y\PxTJ-dC2.!`}@W!\s8W!0.!J2.ҭm\,[N \!ZԃE5++RmM~Nώ9!Fg7Gg<oVgz9^]_aR:|*uyv:c|]ѸE.YG0qg߁+tY^L[xPҿO~߉3W r^긤1 E}%kIHz>==Ənk9~s'Oӧro:7cfPexWأ?K-mK=B*N/xG{R凕啿6 ]NtJVyYgsIKΣ+f_ߦn ^g ?OP _%4>~E^{o7 r>ڔ8lZ6~;dG #9kv?g,w%u->/t>m6 N"CSkb\iؒKx.l"*:Z^m?n>s8-/Z,HXaٽ^B3w?Α\]Oe7ՋT/mkt|qZ}zv'ϿMG ~[.& FгuGa'ټgU]Q5罻sׂͣ{*^^/zz6Dmgrkh-,iER?[RmSwbtoOwvz.xf_ dz*y—b%==Χ-]}~{<=(8r:)9 +MesnWj^գ _)3ggO@^nCbb=J%t5l;-,d9&,j \|:ͻۏ˕mRmh4~k(I2Qw~Z*;6ϓY 2ᣳy`B:'eWc>uYqo3TIX}:rݝϕEVSL.$_~:DCTaPn}n KRzO:z7L ߀^"{ny LmUOq?X?pT"w*ɀ];1ra&Xd/uJrX^ :*9 ko1nAm9h)G~/]_sozI%e <;FwӳqYOy_nekOQ/Ζ}Ֆs5YX#)eDٍqp aAa Cƌi=8̾|<\vh6* 9c $T#RƫUZ)Z df;8[Nl'B54`2F!EMon5l϶B1PVzcpv#vN|)=Xvv`6`e|YY=,!'/ 5^J A3tXi %2(VmlHi jGb5dD.G`~b\! {Q?,\0D,?ED70":BDBm_ۑfȢk: "ĩ? I<("q58X8IASGjsK,KE?0.zEM3M,G'bH, +QEP"m:ɲ'ŗeǞx9 l@A,WNmhs~^:̪fPrZ0^aV}zYr}JvI)]s}]l'o!T(m?;mi6o6/nZDKY8<]LzxZvsvğ_;Ne'~Y\}|O+f-CL_ q j;&VILۼ75;|p^>óOcH#6m=VfӰ3Cd9JrVTy]&<Rh$FX6攜]$ rgoLv 3p䲻ZtnsNOI}z&里GifbZ.%‡ ϠHҧnq"rۥkuq$:#ϬR&ʼ3{`FjRNor D9 HdDÝthvAmO;EMIĴi 6I?ξN/8>(0<g1GBs֤30tr~Zqwc}kڥ/847I,:KI\ZY;:;rSr*Dt`Wmx$lB y%Keb׉ )xLR5sHOzBӆgit=q!h 89e7gb4L/IZ(Pׯ&֩o7ņ>{_1qhdhWP"EC+Q l KCxˇ.tiT9ՀąsA\lq|(K $Ӟ1dTb`|(Fd'L<2憸̺e̎GfI{H+ }ٙ]TA'1v6vC"uʌ(CRA{^u7O)jRNƖͪW^яu—2 8EQE&] _aRaZVH^F5:އ~Q?ӢR^Yz-#E8(\>^m Fÿ-@ee  oޠuw+AC:|-"jx!ez!6nX?uG6-7*al[>ivU֍!%GFs9pXDLu㺁4l0Y3@W)˵L1K#M]?(Um5bo4en_oݕb?fcƠ}1]N˗׏V:%R7Fw RըcZCf&wf1 qI& Ξ[3έ&,x44XM[c[ *gJE2*0˩"[P1 G {iNНR/쾎gߝ:݀q፤ERSxNq#1Qښf"[Ke>j6ŷ/Ҡ^xꈛZ ?Dp :X$͎jB,1U(4OfCo;ErX`F4aqZXd"B Y^@k ap) 8 1yt}}WȢ0 *ghTZ;ecP )@ygKᑴ[ICj5&nhfp%-vi=my&wT˝7SOYw8I3M%rOMDdInvKќdsk,Qp2Z5OjȾoͪ ୗ0)Gjj%#aYms-:nyP WcB)7j5(bD$j6p2.gBȯHHbLN(T0H[eTJz!JY9 X8{ve6?PƦ(5vEkY6iY -KX,ݤ&>B&b0c`_c1X m BKDKs ]Z?gSXR$ 3ŕJP\$J-j=ӆ[di#YN(B)6 ,t3#E%;Rh~kR.hJo`A8߸^P@p}Q8qqW8b>^g6\6U;Ne~>\ ]n7,<`/K&0:ԥ>}ᾅi.!f ؒ /U4EP`viEb hBMg7L0E*]awg?fJ/-P6Of^rwvc4NgzՍqM'dkz t:Mf?<Y"vA;_%SKN7D0pNR 6;5qiz6W Zp!RW-,I2۴FvEEJK ,gy`LAEz)EF)YGē8Q/UF/avJ4  KowgJ,sK]WԝY X  n@?Q@O*lc%͊ e~]vN |6 gBVyk ҳVi[&G)nxf/]qyj1ќY g{6 PT0GCC}_$s,R=z\7F+;Y2LJlxĪ&0!0ח䚪3=ϗ7ay3HzAC,Ij/l [UT=JKgqNOi8D mO< LwǶn`ATC3|\T|‰!d-}|f [aK1OgY)B1zƞ4aƅٛ<`I]0Iy< Ja5)hR &ۢ8L2@{66}o_fBD7} Uz 8 A?&*8< |y6L)-!c  QX &Ş{]ff4.ʲ>HgSu$E\Ir],Jj7r${TD,F#Œk☌Gv: Go<6^״E\q@j̏S'dDפOZ܋u'ߞXN z4:+űh*NPr ( rfUXaW \u4 U:JPbѲȮ@^7>%gGîB JЪ׮JZ%+&>xkh-чήT%+育A8ϖ/*RΫ~x* &vȩs H.: Ũ`^7եZ-Etr!Be/MsV6_o&\.Ҝ]cJe1Tq,;xU6[8*L>FY'O!_we0txľ~\%>dBbKO q5#SrJ͙"ֿxiKc/'ɨG3܁˽9f9'fѡ7+ iO=4)3v iq3 5+>v\ReaB]&͌F Y$#"qMμ-$O.3zI>nw2Е4UV'B()D%%"$%&jP'*ũ\70uKƸ+Xw17ؖ{kP%؂ C X6mA'- 5R 'C ;{~D Zd 2HJQJ\F Gâ(B`( J6fḥUրЩS6ʌ ;6`R3U"g;Y_I\8;j뛎qlZ"6'G 9´FwZ8Ԏ%dG~v$?:;rm\QsX6[ ŇYU=3|[{^5Av3a$ȉ֦pSd;ЦQ'`R kxvWpH`hQ_ڶzv;00^jC'n=N]W.-_.0 Tn;Ìi{|Mlt`gw 6qu&{\]Xhd8mXAL Hi[Pċ+3:kt^9p'ԉ<Ͱ䠖q3akg;ezFw)Q.]Br w&&^:huREï}kx))NS[uRoqNwX- *.xkz5Nnڕq'n~k' %ř scqkNHV`ݓ\3@ook"fXb[,FlAP|3V[ 2\/[5K^_^&OUXp|yvO5XA^_](%8{C~~ddǘ*&)P z4?u86!?'I5~ޘч)*Q>΋G7ڝ\=xx{M2`hnEڮө.vyP.Hlz $.*؜Q !d .p| nb0߂d\IIyx8a?]0~DE~4Q`a,j*ŃCW(=|W3s}?vbꕀ_~cfM1o^9 1(\ݻyLe K*TR2iG~Lzn8oWz?Ik,5~$0"|9W]]n'@N5ي oZ:VXt IWL; 2-<_yKuz8v~1IXJLkYf=Me2wcf$,z*O<}sBSos<v DDC_bEʜmV @:)@drv41c*bpWGCJ~2‭iU"bK(F[ PhR0#4'(xAc5rvrL{܌Pm~:1Oo} znrA?{q#Our3h}`P W_&~xuh)ZndВZK* k'f*)!" i!>"H[t>3ߞZc2"JFr9E=@FCsݴ$U) 5.ϋ_ Z oSI, 9ynQDO;Mu;|^|(]~>#{(reѰ__jX -0e櫾->v9@H`H OtvW`<\aՍiX.!f76 +'^.`nC7g\zlF{&P̊0)rT11OYӠYɟ̓Yu:O~ͮ`픾=Vu;Z"gč.> 9M?|@>~"tŠ/o//X+0 00Tg:RPy]TUN5%w }]6?@rD-6"H\:$UXM.J=wl+9qcq>{޵cٿ"+曗bFczngXN˪,).IWR(-}:RkVˬwzWw-j۷~8͚Q޹Šx 1@Va*d;~\)oS/ UUR G֧a. Q_r~&'(k˂.Kʸ_o!DA\Xeҳ-+q{'neF g‡s_[s7lɲE'!'nfĦgϗSNcWNZ*$SNߔN( ́>e-hE6OSN詻xhVSl7A{Fٍá%8/(7{)0΂NkjW KS7}'iiHTN:S%&9rϹQl}}6bKe qiJ5@D䃕Z) %!;D8SRaUx d~gAV?`>R L\M U ]^"sU bK}׫o?G7Qɲ?Щq/Kŷ'!X dږ;ѮFk}盼5!f^-]/M5s^`t0ս zޑ"JTa/*jauFΧ Y p5D{+ɩyȶ 3OCv'Y=Ip:`U^[IQ>9 &8u3`H3H`]ۧ/Kc1*p@M ]9C%щ"Ѥ69k5)*KXL-wB s[أ0}>p@pv&pE$\F 1&DRYhp3ƚLek9EAaOQ&-Gph%T9FI}ZL5i6=CuTɹ'O3&[HuNG]n#XT' T#ϥoQT»?:zg4R\ą[ 2$8U:qH' >Ȩ@ɑB>>N됌c) cPTqK^ل UqbXXL3vBcYa\lwdsj.ۀS?v~Ӡ1FlGBbR'JMҜf97T hd.gx"6C6l`;8˰J8%f&脶@`PxLSAbڱ+j¨zZTI)lEa-NW@ 'É8#XE0IwDfHZўGX"E>N IhTGmma<,&fڠv7c/`9smJM87/Z^ioF~m}?`li&{Io ~¶V绋BnvC¡);ގ'X 'ͯa_'n@?NZ+NNJxu&g&G].zz+8s ݿj~a.~h=~Pz 3FDFiFG m.7mUW3aaf൧w\jlh~i> _= f/jvulw&iosIݤDWRwd/ P\T$' F%\#\_Tm=)EKxQOHB-wJ,Jp*<2&w,+i)7K9]H< DH߼3&(/|DC3Df>+lzn9WڮS>O.2dR[%-@ؒRr7Kc{A%kG]x_KQT P Ad")!Ȩ9e=z ZȔ .7zUzW?Y<&pm\ĸBz̮,IeqbF鈈ƸDcT2 ;Y-)U9&%w%aO{2 K7;%kC4Z<9|-Gn#I"C\{vc6$`X<2TsW kr|`jSL3o*"Ϳu >WAmC-?3ےZ\l{u$9h}p1o|hLe!x)uSi]&N|ۓSN߃:Y)#TJ|%8#('bZc`]PA9j‰\8Gl6~3vu ou'!׊6Xl&Bν |{[Wغ~ܺjZs4[?uز–$wnx[?-z^h^&fW(yzmt~Kӳ|Ch~ϕYzMYعjtSCLqE |KqGE.Z6#DBrav)Q 8NCD:IT>%3k>h $`,%M`IQKZZ-XJ8scc IJMTBHψ9JXa8sDj OJ;dBNrpz%us 5)(&p`聨/X#L\ D}'ERJO+^ } ꮯʜ\,.#WYZ.NBp~F}W{wNjAznvݙ~{!p+DB>aZCS; n{˳a;mp=dcy0.H6wi$Oi1aPm~Lo_LZj7ڻ #s"+ᨩ@lR$c&,R=h W-Y_곔BKʕ[峟V&_>x1I#uW3[)Jߢ?ùέT"؄<;igXR*ς>!QrU4՚%5*CEl>BLOj0yRg7(l2קƷ?c7_|^j0<Ռ&Ɍ1\` $xO2W:erh;NI>Mؑ6y&`9%] |9(KC>kϴC8p$ X5AK+Q8Z+au" :j^cհCY5V ;\*\]91$DncZlIJSB $ 8ୢ)1$C|^MQ3ﶈY2:+%#z~ϑ)U5DhPbMX "]epfd.@!}Ɇhϑ!9s%d"<JfekOrF qo,N*{M?B|nN:;}n}Ufrfއ闅{NϘb)qRs,SzL"Y(/F57Eg7T&"pJ1ȅh"u P-՚1JBOԤDxvzPBhSM")&#^!r=0p` h+S<bVA9f ':8c,qީESG.GewJ?M-Rl9anD9i{۞-DS)X\ \qf9yGUguKKM&P/z)BOF9c65U~#m&+в Mr&Z \lr-|ڷx w* ;ڟz^vk{^Uj3S bo4:=|{~Fϊ۾9z[uunD .ސoJq,yb@1rQh2?>87.$nd?Gof[ .~foDyqeiɟOF:hܐE3.>I\۪.//~t4syֲ w0Mr y9=M`CBM.QըJ nq}N%+녶'ȍ?Y) WtaXb Q`c=,j*%+W_|H5aAo3SK[m|ު mU\Eq9|ykS^R:J$.7p{괣C^V_|5dqI/Fx@roiCزEШs~ 8H MYlŨXo[fh>$7X ޓTį~K3+b4.:rrٻULۤΫ4QSv-{1>xi4(w;*.?pg3zs"D@ᜟ i:$VW0m_3ݳ\qL\7Kδf;]Ⱦ$Lܦ#HaX$W#7 y]/1{ hԠ԰.T[h/n=AmjL:Y\m,;ZW쿩]OWSP|Oɫ"9JEǂ) EQH # QE@EGc@V2GOONޯN̑TI;#/>>)_/hǸFRERS6IAFk DưFik9\'FXp4Sg<6مvs^yHP GW*fP.)և<+o yozn^Vj7njm,Jܧ%@5VQnHMOƓc{g#O8gRtؖfn(r!+;כa=%>OP^ 3}^0AeSu(w>n,b糤P:oѱÄ69ɮ>7ڥӰ.ke-e` D BP5;LjnD&Iu[];g.G.AanΟT̮*4[3Ж dDE.x;$O"JΪ!eR$:3ŕJ.%2 ϴV0഑,H'Ca=-}Җ@gG&sG;RhkR7@ޞpw2_+D{&Y>p>G)syQ&3Π&KWxhb9\Uȳ4Loզe(Ӭ |Q9t*-^-[(@rPh+~俰Z`O}V{hYL^&AftqW?<<ᶎi.!Z /ǣl)y&j.u0aQ,׸1F; }$#J0+2F1 1LY^B Ɓ/0̳{Hk:K.9df hbJD^oMϷZ`b"Z~`̍}|O*1^h'G[QGioEbSDi`=@KF7D0°zcԻzf: v7!w C]6JjasD-6r jatHp{sVz\r.Qʬ*d$D"?y,8]<>oZ~φ$wjĻyN0Yh; …$|ɝ^f>{.cU!tA_1l7s4S⑴l)uVۜ|񼚝JҲ"]2y~dpμEce#: >(pܤsZL4gVGÙz萅CT&h,b'p{t 6e^=wZБnf"42jA'!"xbq.4C3r24wbh>͝=s64"g}Cbns-;[WNOKa}/Au~e$n3$ԥ1欃/jWTYLGף57WK$5?WkЇmmvVdBŴ'QhYNT}:ˇZs$?g1Y.dWo6½VhSt7`{8SbXF:pѯ~p*o?II-r@iNT{T;֛/(6*~)VmCZp =hcK~ؗ'm<JDe(Gf7% Ƙcl2N2Ke$"_jY"M᧘?G#CAhb+t@Hm'g)Oc.*5Xgm"qMvxt+Σ>R>iMJljp5K%X,Qd eJ+gI)>AX <9?gShO5Q!r) BpAl KE,hm#AC4 SK 7gvC:&. މ&0|pUp>:Ō{]v;ABV񄎞a~lWaVtxGڒ-#X&Y#x\i>%\:٠N6}mIjF733I'DH$C%f^u ;AtRvQjTJcOpP4iejG 3`#*tzGMFQuzGB4E ͺŲc!U'.HW5nD9UXus_Ι&$oS$J7eɩVRU*$u#D<'~ΉN*N &._$jA&nt nNG b&@dm)ϊ/^V7~SlǓA][e#|CK*5a/^D-49wLJ7#3+3QG EcBT#ё[qĠ^ Rf%V"+@C#RlBrx/eKY>r#QI98xlǽٻ6$+_=eeF%@LOc_ #)F"QDTR"j ,"NqN;9|IsU`p|H' _?5:P~HM|<_M _~] W\+v7֢U-bvhH)Otg].ЇbFĆXLS=TXWD3"'gFd/[ψhb7IE3ՒEB*R |Xhd ;4}<£Lpt<ԲS Y K" +!xgd b"6(+aµxQ\̺ZReLjd*M)X4ʜ큚MfƟjɵc(O,JZ{>yToϿ:MQy89WYF"2|$4 % R']%1E RA =9R$mSqXO&*EyS^2A5263g;2*Ͱg싅“;ɬW*3>ߤ;MLb7~@#6$lLHL.bƩt"$Z"FcHfq PŞO 0@Flam'Pl=Z/÷2g;b(HcAfޱ/jƨmzZ")' 9GX+7OBd :EK)mjgJ*:b5)h r/׎*‡T4[Ȣ:[a3sLԏdrrcAf/"RcD{D%j!|AiZ`}`q'fY0pxb!cbf*Y+[\+(gB T6Hbd% ʢREuۏflG3?8]n:f^/.Ƹz\qqs)Z3D|;mD L2ڤ%ApXw쉇ωObEܔg?Sfa2ͳףgYTԜ(wVdQ*vizqբtTM"Pt,Sw]YP>cyDH4*yx++?|q-F]wŞIl~Nf޻Kgo7K~n̷hިbu-뿯7N?onn'r_pQ[tay3e g8W4w2`7F{S42_Lu3xH"Xd3o>u;B`dZUWsNUuЍߪZieߪ;lUܖ5tu>Ი=ԏR~Ó"M.iT 91lx6޻\_|7_pkB  _FF%k1 pE 󙘝SqK;m: &'3VsќX[]G:V+p5u*\UtA5ם4`kQUHj=vU%}*p:!z \+YY"tp'W53ϋ.wudzR2l߆oe9;}T!&@ i=kl'g5#\8`(u<|in+7; cTuw<EoA눨Nd1͌^^0耲~1>dYaOGG'2's_ʣ4ۻ3= .o74{. }X>9ys:''g/tLd_xwՂKeƤTaO)%tv(Hg˔RR+E\=r󣯬} f0/$ rFT Bі`BeB9a=8lu@:TbTzZ.>E- ELўJnn!=88yؑh}Bk,ly2k,\R5XVk,Fjzq6]~Ob !غ":D ΰK[|I/I{S;G)~|fgT_8NNl}u3 CA CmԢX2E/|-:Nt6E5J\OӮjGTh;v@{~@@Zg> (,Qp:1 *'ou26'%L^jڸpcY-PREk>m%9+\ [Nw?uFn4m: uݥDhY?5bE">g_Go>=tIoټ3<|dG.˕~M3iμrfη|wCQEV;o9xm%.?]\^'߶ֺ\f=)4tB5^Rr3isttRiYoRY>+H :$0>s %"%(193be |t%$b .ϴ|Nhj*+J \)AD#X +@ٮ;}L*]W :jK{O@/٭@_b>Ԓm^ x&ol$f7N:])q[tt[Iy@VtħY'JFcVCRGu5xN3oj]懽C;B^YgksIF:׈؈MxN>T||iE=4r@g壳Dy׃k_gR&S8:j),%T32al1y`un3A/YB][iRP#&,[[vme@&M,d=z6}{{ƽluۻB62HdBۚž2XDVZS>C$HA<P*('Gm*.D(osBKW0HFflGv\6}XxRXx'YSeқc~4.*1t~8O^8bJbʖJHȄb iJIj@W,B/]b4dz4DxŞO 0@Flam'Pl=Z/2g;b(HcAfޱ/jƨmz&Z")' 9GX+7OBd :EK)mTJ*:b5)h Eđ1_!x̜x8ֶǾHzDqǗtB"k5:ũT dcImk9BXgPbd3!H*lL1e["#b3s#u`\.7\g3/]c\t=.9A h">6"SvA mޒHARdq x,x;Ռ@X' vnJ3) ُ6xzl".MD$B"y"9DH:DH:l1Ya-ZBgJ Y@RҘZ:2DhK'DDH`8ŊVe8Q/Li*0E3dO(/5f1rŒrLYvx>[ V~TU|…GTlҪ,Ȓ$Sda48FAjSD(^I[nf a]OuL`u/ ktprP~݃]?Pft-ף'd4]2^>vuV:X{;WCݚVB"u*{`tuky[*)rX DH:%g MvL[ԽuMxcd|d`m֐s-p^x/1=+?Ӊ2]7ySlrIip&RA"Ɂ"Ő];AQw]Ҟqq5Y$!O'k0ޮӓ~뛖ok!_xnf~nfnmZznf%quktЭ9}e3˫:M;?dϓN G}IdWvNytD'a3 O5 O= O6 XdkPI:*բ6aq3h1aLXJZ:(y]Gct{6^x7eϼܡdž^SOnU}(4rɵ^mRjpJ  < ,.n닺/>7A 8+iСs cTtO }cmPdJsB-QYrPL"t1 mbCs(ӼO^NίF|}ٳuv7S?]S&W{:Tw)8!DI,1U2BO>Ȳ%P NJ㜗>򔝎,gThآJ+Q43g{˳^@:Xl|W6m˻_S=+zeLRIm#+(zA}ʵ='ٸ6ɦ{WSfD\׃,PJTVBAc==ݜ>r.?۰;[&iscϥ'2hG>S;bֻpKX`!_KC?{-N|&'𛾚M(79~}JcнL4 ;r⭵3$a>"#FyE&6HeDL qGyvs(1d#ÁV46[w;X-O #xFW**EH~:FDS<#FX9NGCGxfb8*GQ (h(J#6\f*xn! )^9ϼC[ ; /G pgZ"n݇Eu8:O[;nn^੝~`Ń,`9B 0(UXDJayV)f,qܠzU&zZUQ48x V53(F#B0K-У8j ߟn:=UЊ8yd>k%` ,3:ijZ"x'g"0N&GLꗎз:hmG!ۈ(ڲ i@dGx %nup#E8鴗sI('DQA؀%pp)B:JiX#1{ ̓œYKC0L8q Vwۧ߰ ,~OOEM.dz>WAA p&W)63Xia=0U"Ŵx~X`N_CQ]W02RLtϪ`t/ѭRkPۼB>F<յރnzA4%=|Dt].V] x[aL§޹b9'&,ӱqoA;M\y2Z;_*%2n qouv#6- ~5)2C[ͷ)D.E>(^mIT e,R+]]Kپ^'U?5 FAǯl|SQK)yל'{%C~6]m?ɑѸ \%܊WA~PgS`γh v49G(FE[pgӋhOF ӛ ) WH:0F >kb#XXK~v ʵ+ OB0`3S񶊭.mbފKe̕]dgg^epcJKUΔĹsOv4p ]1 7IN72,[޽_;)!P/6gK} XHuB6*\qV [6*JfTlVxEU͍Y?=I%A 2WV LJ;X]R/R,n5^jӐtSy&zxPˌ2՚|e$ÅЗarQ^hb6*2_i[:M'uR O|PrU|M|>NHaQ{$WOnlP4whVLosaMAu=TWl ˵ Pvݢ3h޺Ǥ,\lՆ5ewn+DDCW3y.RyS d@FX"Qƀ"KeN,TI; "(O1.]SJp*n)4X@a {a&ţD%t<||(ų;gLx=rt ^W= -*hZtUmZ<80Dʵ Nir`), 1>Dfn#ap) "AhS>~,[}ſm꫹yW\OEKawstOhU}tnyg<I<w/hi!3w@ ^R1jsw iI龈|_wc^~"`Qk9Hbt:+d@"=beS/i3{]5QʮU)y<J[yNKNoa`]X:~-=i{L,c0D&p TQ͍T1sFV;|||&y|zi^\UF@{4aDŽ[zdAgi0Ϫ5<10c~ n4)RVkQƂ^~DMM^H)C7_b~9\ݏa")|G8?^RKmG%Ya 32(PVM@9 %}ѯ5p %d i>$]nк:noo[-Xpo /e PjguWUê~Ե5/h&UN%xf6Jd'Y&l" ^]F:eh>Y>^d_1kX&&~gWQ ͩE:gQPnrOvh{zr"Ecd}K= BD#$r9潴H<1qB0מ@cpw mz z[S2LZfΝb"iJ {M rPp.YFa{J{D.A:R")9IE$s2 EwZ{p0 AP$@2A:|*Up$"T(@# c 6ptH8( L`-%c,)1KP𿠬[Oyl}HH)g Fi%R%ϴV0`,H'Ca= RǂKb8(LdyTkg#r V B) 4Za5E)G O`JoN G8:5P+D{@j?8sin\be*H>饗wUtDqj<U*|^t"|^o]$zZ`-23.UY$IBI$7RÌ..W\z 7}ba#[1xT:",.}`;j=^Mrѣ#J0ʊ#$0 5"5Zt> lQa hJ-z@Mop-d~`̍>>-O"i~$wo^BDE~}0(bY$ke{i0TozRPy]Lz,r0y}Zh!RF-,In&j+ȁ;YH t1M*PrbÓc3XT)U*x6&NQvz zD#u0dK(i/EWli/ʕz>^10l Mbҁea.Ilv*opjd_eש]2'}:`Y)VXg5AFжwnx ܤsZL4gVGÙz N */#LOYTNpI#p{p 6y)=tٱb(L!DE$ZDqKȨ&XeK=1brs0WN31+YYwͨqw'v cӣ>ThT<ЧD0O&)+S }JJuOJ} }zO RW@ɨD.OE]%j%?tu쨮bX+$JsdU"WȧJTjyTWQ]Iܷ v5rqY94|/#0kx>#'{l2UM=&X~&Y&Z3Ôi5g˧wnNx)U@0LՓQӉ\jվmD%G5 4Wn> rڳ)}^b&U9.K:ꠃQZS9/F-T;9^>È~!o:(:KſtTd)2h2I#X`gsbX$ea^ybklj#;;bueG7GQL4ⶥ#xNWźL>^\__l}suN3~7D":T;9JW" g=YT{}<}*y..9&FjuB^.5tRXE8RX5O R5R I'NFX`dઊkܩU֙K ٰHSrTrNXZ%Jhpz)-NꄀtNt2q',-1tRJ5•YdN}m5,էOWi>$VY o9zs5~;g4}!TYgg׾9ϣϿ濱;v/UCw=H3CޝJ{UWOJeOKm1כi6콳o{$wgWC7gLx3_ \L3%Z:|e/i3 y?-^jћT[zR@)M78gz|yrSBR0%헻؃O7oQU4!3;z?=qK?׋p=Zj}B{lxn؇P$=Qcg4Ғ:\Mt ^!\G U~'T%WUJ ^!\)Dd VGW,++~pRpUt2pU=r%:\U)V 51Ϙ3P4歬f]歵x4 , LWq<fiﳫRfFʧ/:/ѫAeyy!!f73K- 9.%~ 1@HH^gY:-iy*Ĩ:FкofٺofٚofS}ul7[}ul7[}ul7[ffٺoIuDO%,sk?;` Oc6CWF<9 `s;\x:b%6{6 ;\ޝZrm{>7Sҩ}5 Z(3Gkk w2kD4Z9TIkSXTvϸ y\}^C>NZ}uu=Jnj}[G?{6?>nGk8M}caƭ"v'!nZ7WE6hߓ|N7,ŗ.&ɕ!F]~5b?K;FyTz!Te {O);|zGzJO=Qcv8ABc^#.K/|ko_{x,pUT% $=sb)=,)di~Fa`s]ZM>,!\uh^DurE>\Uè/89 AҖ`JA%H9| 9iTѤࢧԇFWkOvU͌.4k~7(& q(fΧ]]̭Ic܃%cC,ICdQy^n"@Q7K0 a')18CF'o/Y#5f.ɛz-31L5v H8 M>@Ce @JR!8%E$)w:Л8;"!{@A{NDe2Wg[-+P:KatpIDa,FwF&IUɳufA Njb֥1(Z؊(c k=3Jt %=/پP/S7fZ].^]JRozg2+#Ji>!i00XGo?.wɛiݍtx{[FBWS;$([\Vf٬ 7nk k>o`E @[J$dG_d4dKbD 1D( =9R$mSqNT zS=/ R}#colGJoX؛f싅aIaDun7Vf|ZٛHyxs9Ygtbr#6[ILQ I1!1E)I  4lfC2CQ^[*ȷ{>P! `0,d v\d`\D3b&vWdb{ӎ}Qڦvc3촬r):=/s+Ƌx'!2 :EKW<,X'A33VtT2֤, k\S[hl{g;N/y4}8OUR:hM^a'cS`/ni:toʏrg%k bS/vE(ѡ.sλ.,Ϫmx0͐PMۖl1艬VYRΔ@ dȸՄr-X5`߸esY>rHm/[zV=_N]2zY̩x>_~/x(r ZGj'#e@€`r-P6/s_i(SBH2p th^H {6H9Eţz^Ľ}߱y]9s]K utіj?/7䤗SA_Sf2f4Wۑ\mĪVsNϯCf~U18Z=LjdWDRF!. eQ![-T 4S 3+`nqju/H?>(ٲq+mHʱ~JV"_4iY~K)ˌ tuBw!AUU|Z*S/z3풊jP1rBhKƳśu Elz|;98[Ҁd hԵBT'ON/^nu<%ntZ[Iҝ-3ccGaG$J{/-9}0ڌ)BHt(—6f]^yHw:,=Ϋj.,kԮѨr|b.Ͷ1#1-\C8>Ne)zh9DgStQ+)kt ' (t;vӽd9@Щ-^uV% N'&QsV'cs|]21PϕZ CI^z&NǬqnz >tBnmr?iW"y:Ge1_,_=]yxɖA/t@+U2vvf>ϡa+7jxw7.:Sys>O]si/Vn|[T:< :+rbvJk6Z`A sBRu* k 9Sk$m MFWlQk7|NQmy=2iO'6":`zX$z~vN-z&/լ:"ApByQ+%=S</#Qy?@.gpRYm# _~.iz7ڦ&0:jdQGl3l!eqh]~393g2"uoi PEi^d$u=oqm=ǂ_:۲STitg'R=˛~mt?Nw6Mrki$ct'i:g2Q(zp+8U,- x.&ixNLD-ɨt*uT;p$3)3`;Ʒ~k_( v<oA;Nf)56)q)w.*DZBYcqoچF[C?@/'\}`T/1HU)T`7fL #+E}"ӻdO`jW7:TէE,Y!n)^0quoUSWnav57iG>/x˷6Ė"%wzkUb9ЎO/ N\mdZ^y]}.==d{lz嶲[ck@hJIgU>nчՇdy'%Ek˕12R Bl^bf3.C_J{)~ <{_̮Ro_73nkK5ʎkr@ɕe4swi8Y3CO]-O澐\>_2+x_j*K\3tRLܴz00_JɈQ=#\K?rCoZB?WI$?~b}g[cl2{n_(ED'$?>K+ ̫,70F&ilefIzLwN7Ey[ <-^~>)~)HfWSq(8l}}vL>"˔#QR !u%qjk_߿Կ4cjg<O峼͒s5#kA zFr㎝/}wp/x5t[WG/86ɩw;"s150E:2R-;rgн˝az"cThJ3&1GȼP08fڱ, ,:a J)S 95Ĥ425Ϊߟ_}0>Mr+^"H_ ̿/9ѸFs.c7ˆs8C>,6}p φMxyQ~^U7qd6G? ׬֋ٲnU!y>%*{{uǣzc\/."i"͕>r醴#Ƀe Vk#+T̩:*p\іMOG>:; rϸ$_\uS~pMuE;oS K5$\Z{\J՘pu8b#VJ+kX(BWR7,W+& \'w\\uj;PpuOPP W(WPpj;@aJ{4/ѷ3\rfE#pmT5, 39|Lxw^Y(\QOs S_#cbQ+\FJ%\\}zK p`+Āj{Fz*(&/0^M7{LfyZ~<!6)v,jfI@_Q9sVKlBIr:+Iܤ*w.{CeC*í}@C.˩T槸E&OC^^dU@ ޳ 5g39|)mW/It:ޱ_x`'_O9i,bE 3"ϘA-]vh=Fy+ #;*XdƩLm\LT;s0)C)( >(PBT{ﻣJIB )[ @2\Z{\Fр1wq1\܀pjENPe#gbśj}[dY9 P _~tzU.F~~FB9{(l鯣jus6.>uL,ѱH)֑X(1R9'Y,rp9js ,oBW>6|66]|~̊泿SEׄ]T6sG*rt|tG`nT3j9O}-;*ؠ6zUڏEݣb,&N(6VfoO+JǼ߅.ɂ)NopPo )oGˊq6խ^7KX2v5v8 V |y}Dc\LU,QXSlz6"V0K`NrM_\uS+JӳX؀6=H0^`prZC;Ppu= W^w+M(T UZ:q%$$g+ P J{ UW+9W\N%j,.7 F%!ap`kʾcUr>`1-2| PR(DZ̧E.jts^gX(K5kI4 X,8c3cX"e"Ѵ18!@h=imbT!e9: nkP?TTߍ?Pi4IHBŗ\KB5=sp\J&\ #Tpm8l'tS T AʂkeCŗ\[T؛=<W׃+޲0Z7㪓`wg\NjӇn*Ex\Wmzʉ: \`ͩɵ*\Zmq*5pub\,6$XR \ P=Ppu\KV+i(BW+!!JJTRn+Pɨpu҆)ͩUD͘T#c2FKW[ !2*AL١pJTkY) *U#xPZEE89uk[ 2Ϫ v0B daeB2 aQrY0FP}7 QN(ԒÂ`h0B+P(;P%gWFreB@"\\BW++5" \`FBrÉC\WRzp%Z6K[છ`#nrqI-{U7}sE\Wmz8!< \`m W Uj3q2 \`:ɥ:\Z{\T\&+t0BZJ;P%WB !C`prWV UآZ/ rUWhND@B:5[TkMq*EcW+k!YW(؄+ke(-|W+ٲXZઓ`wNjcϴmU7g֕+9MO+,y0BެZ-+Ti䀫'T P W  P=PWpC@pHU&\Z#+Ti qe 3m&pbDsҜ\i+4մF|hg){IFTw-cұb4 1=ߺSҡez=²B{c\!C1 Q}7 Qe#Rr0 (Ԕ) V,\\C;W+C4 \`+ʕĝZ{ U:D\Y )$\`%I0BZ+PE?J5|!zpZ6FvۛqMsv~qMٓuMeߒ\RR4gZdY9NNPB!^ˏ.[t ;7GDFͲ|s}z6h /,]OMzJonKo;o[`ȖZ~-!ny8W-E泿SEׄ]T6Ok9:>z0N?JL3޵Ʊ׿J&`$F>8H/.~B)R!WW6szw4|i,%N3K>hEm*cAwnm{gӆjoN7#yi?3`xao9v fUlqۿ__?_a7˷{47헹I͕iD{vvivz-ӆt jl'ևcj%T{Rv,`7zmz:teDWF/\BWzP(t,y$uLq1t5ڠ@iS퍇=_t`'נ `h'LG}`g6,W'I.xck\I/io;}])Ł҉{⏉bX&r S@/\&tUt 0P.G] ~1pW@(-t*$߄_g8vJBWO>9Avw p qjwm-(,^mڔWիW_l.1Q#ߎvPO>Q7Igd]lz6)A/:(f|=}]r8ޓoC;b )r^TN7'>㓃5цvoO>K YJKz~'}Ԯ^#SĽڍf~B(~\w|x0PU-~y<9Ui /||w,GXO+ &GC=~l|u 骭kEy5`d?ɱ%C5.t5Y[]!i_t_wN~~d-;[痭N|7{q>=p)yӓ.JE}B *䬩I9KEHWr=CMNthV՘Yc u*֪TjqqʹRVMU##G(W@chE鑤&]mޞ~9d8Mq#kYnj(`ɘ!gŷ}0ԼIzdUѬZMM%ij(%%VƠ uNF}f]Y0']zrNҩYJEWRHqu~0 m‚ڈ ~tsU>J $ڨj)oCX(pֺk3n2D@$XZ-RSsHFu ^-XBd32n: VA`fYt7(!Q!Sh;׆R H2P4\ H`9gbjb |4@sXmmVaD)QnA!VϾć b1 ±&6r'ʕ<P\ZB64GV֕5@7m0*A`cyc֣sV ȡ m "׎]SPPձ2L)S`q_1O-` tW jX9M ̆mBCl0S{4.qN +QPIw4 **ӫPm=8I(&` ΫmJ+Z;>J͐jPoB+~87(c)hduP BHPED&T "=+FQC(d9ANE΂G iB?%pB8 :)ٸuZ%@ io `Ʈ= pYi TLgvJAQā.(ʨq` fYR Rl m_BRFCƺmw3J1 ETcܳ.RVOcsϨ`[QOeKgQtʚ9&TYk|D d@og2l 5tU5gM23gr9_nf1#.UUq1Q,4(PlʐvH'"/ 3s?`ݸgz\o?UЭYgg{]`ACZ$,>&%AuPiPL.B[0uLF IW=$ B+ut2: Σ&4XhhVh3{B{jN\dUB5Y-l5̨tq,1h^{cIA'kԭ-cQx $nGe6Y,TG7?/XE90xaMR}Y+ QHk"]t8;{gyP'KSqEHl2obM=%DAԜrtcЖ!/fs7DR\G߅^10=:H H1 jcUe9%tmIJ X}t R(Wf)4D[nV Ê޲A3 'e#kq5α[jc:LMUO26ٗ ?AjDлƝF*zV,:fa!dURh3A78ˤB A3m'JZkqC-`8C:b:4I1FFЙ4 jݸVU.m i ovAVeky4U*qaV(m``nzqt~ 2y;pIr㱝Ղ c;5B'eKquh0JvB{'UCŪcԮaZS1Zk Q^Qzbh4vM ƴYoWEзUfĞL:s8ݐh!/Q[úb tyBܡf;kRʕT*!:!%, 3Puzv}q` nw+Aj)5OnFz7pAW,F/(q˜ -F}hFb>tcT,~ xCڨlJ%cT-،Ӳkز si6@:XV=ش=CɗMLDb2*X@6'3FT赛8|ƅ7WAPW bVqdi1`YVM@; Z&[Z =W&3 M F9`=KFD=ki0kNrlCɵa+V @&"0@.:  Yc6ՔWNӥ!H0r(:fŠqߺ傃БdpMԅ]0q$d Jkwmt i\]wTg0B>x- cxklyދ/.6۽a)BY(ZgVam;Cz3Z%\GsH %Qњ5j][Dܜv+?|/z5-Vްo-j7o0Yv9ݼ~X,xm>?.6xכ۫/ Ih~1_Ƕ‹wju5isvu~j{~v*v`xƶCYN1X7/mI3KП+OH]Q;K';Mq v1N}c{\8D^@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $NqjIN 6, DZ]@|N ҨzN eމH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 t@^hx9N uj1N &PF/NboHH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 t@h__p#/ @@y &N'Vc $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@z2NϷۻz|l5no777 VvW+L !d\oٌ̥KhU\q hχ7c#ƥKZ g NLWKՃB@zpdt+zҏ1.bj%h;] ]YEYpiUt5в;v(yBW߄Hq0jAt5GdbjW;] ]=E 37.g~kx>c0bdWBWAv#<_0kC+N=Qv?ˋЊ<_ڬZxܡm5T~2-cJ{9_!a>;ga`cm,`wS"iݭoDIԢDX,:^VF|" FT5eFhUhZeknr1Lf8E!qËfK=?&c0T7 I8t|1 p‡ - رTW 4|2J@]**(B -.uGžb6~g:)Bl]7moa8_Vo jI9pV\3.l}vkZ=r2'KDE.D "3LdOL߃hSQp-o(T6xQw?GQM>,=m (%]!] QN+k[rd]9ҕe6MZ&i 5 jtBttЕٱM0M99]SǮNa?/=[B o]=ttܦhZDWSBtk R-BWMr>ttutŸPJnZCWWմt(- sCy gJBWVIti:zX&gPJx{A+t[ Ѫƫ+DiGWHW)šf%*Rkŝ˥޺fjzWM[ Fh?Ru4}4-&Z|R!I,Q,fQ~sH/SL{S봘AՊ耒Džv50c4/Njmب% %VNbaE09,~FZw|H# oS.%D iˊ.YG MlK0Qbhy}tDYڪ#$ʶ)+kl[ xBUϑnOh ֨+@KYCkR؎ΐ,:x(]Lk]!\EBWְ]CWvǦ}H}S~kN`?D+B)IAW6=U4 n/ NWװm>]]!]1%-S-+5tpj ]!Z%NW҈ΐ2ҴI]`ڣ.7m+D+]!]a KvMm]\I[CWV4]#]XFQ#b@Ҿ`RjI^omb@mMjJNӈϐQ|i5`cʲ;.)+ÄдC{2WWZհHin(TГ/B -Bt9BmD}_65tp5%m+D+H Q*ҕ1Z*kҞ^KE[ rtB;9GhbZDWEY53RdO,gB0'=ӮbW{TZ tU]sZe i]`Kuk =3'Ja::COj?Ա=t( JO~թ?'gVg+@it %]Ak-B5 \gJ,Bأ7n ٩W 44(h iZEjPMc5$Io\gPBKjZ3|og/5`/r|L߮m!2XNrRsw$x"L%ʤ|L0Ø2G }ՖVv]-jٽӈbph-|tCivqϢR1hyS-պwo;Ayd9#z}dBdm ǥ+"6hgI-J-iZcQs0r<>^Foקʑ /D a?͖isX&?bؘʼ}?hg *_WC6 "y6#c\V+?zҦ0qqTvJ+2JȜ򨵷LsB E)C4kM, z1橈$LO x/+ǤIK2F"Ǐ81(qsVS|ԖhIwj{b8h.W^cWlq6 buh^q ( Yw>u9=鯋'y hy]7k<[\mʹ00Fw4YNm)O=]G7M./4˝MW>^,CegPB;dmRy) Is'h ,O95[E-  phlm6s*1%Ahôb* ,_G$!).hؓ=폥A^q$1.t^&Q:+?ugѯ94 >  L\>edֵ3?дKGJ1(¢>3IVAGa F-:eU##Z=ͮc'&΢4 !Rig "y,#EM0 (OI7X!XAaRp<_=ލBV1W,LWC{:b8s q 7O\̇듅h/U+vv3]L ߫A߷-#U|(t2_=jU ˧q}H7ejq=XMCVZts*=']!̧.k^UZ,s0$d(!bsbktn@hA/ݨޯ}__=),6`;| ~ep"7`cO 8{8sL%:TJZqyCڏn r4Fj9XO] $yU^@Ig 7VD/ziO87ԕEyEَ߬[`/Fs/Z7^VoQgXC5`eT:*΄[. 0ҫ )Q\@{|n\\3ZK~5e/yOw:MeӼ"rO<*^p[8g,t{ʬf'EGBpLߜrf"r:! N6E{D/hFԲ>>eOo#GU6>rϪloh< CW) oJg(N c&8*]G 4b9=|w_h6q!;>I{(jL7nz lvG4%Ki?A* V # MxX5.bxw+')}ҁ '\o(cA a{gI~4JP.W 7g?j5/JI{(aqOKLO8IrH.5ZIE5MOE[z鯊;;m) Ήe9Q(]C ŃIR{g-Fh^ώ6;f>6ó>W?( ŝA8wc52-ѽ@w;D?}3k]te2y)O_G\x+}y p?߼|ֿөd+J nBf2@kSDJ1'Xr'j"Ύ2{ms|z?yH&Qla63/#QΖy _L5 Ʌ~'` V?>f|{4;xυrTa RWx%02iN AZrX9:h+k<wvɌ+JI|bYj%Q$=ZiJ9fyG~1sb8ǀքԠ cC+\XfrL`+ .OǛbR$m̮:::T ڦ͌4DJՖln@+D73$l3֌G_/>/Ӣ-;Ʈw`s_gt1Xr L9r_U]C4#֫ w!]s]A?FY"s O xzdWYY&0DѫQx'EYJWv($|H6ԡzh,2jDpuWǫNj9?Q'fY \YZw/Eɷ?]*,Cɵ+dHy볏6.6?mV>Q*Zُpc/uÇ]O^s輦= ސk3>ju|spiS7G6km8p^dk47!H$Q;KfyІ( n ~w\Nrr4`\9H"YKu92Υ ܺIc*͂,BFcBfki <CwI^|~N G奞 W`,З⣓޵q,ٿB"#V q7@&6)1HFV%S#2dj]uTwu3QL#Gqqzܨ^X:"mo|"JH3vPQ,X:]Dv<󠶴:J(q)C KΩ,K3%=*W<]w6@Pw$7oq4LJd:vf4ՒPPVF`$E'56":`H*}$K-FsDK?<Ɂ=YI=y[ߙnog_TаVQyd4)xaDMFg|L,0'!آHCcz]t&u1W.H5g72UZ$P,(,0,̸7q4,Phv4/>s&+([D> "$:eCff#x) BYdf V,"='k鵌h2v6sI'R`dLcA֤P6-Pc[3*V0L`yΑd"Yn ?$U 1Rx6.xTLL̐IH&&'AqL%%48Qs-akٍQ?J0ǂǡ-#"v!6AldL"'vR-<+5#>2C-טf*ɼ$TlyœP 6[iׁ8n# -u?f4253/N9 Y) 묣$21ђ. >J 9eVYjl#1r6Du.jG6^Z"SBuZʥY,ްd4 x*68KKdROb}$O_%HZu]^ d*RbCШI:dz&W]4-Ķ7<[kΞ2SckWVKTǂ2B $ 6F&Ar]7ĬQF%vh Mz҅kC]"[I,*!V0I{,F`KShsDDStYEfjW+֫Z0qfc1&k셦(E)DqpBȝR(k{]/nb[eepbPLˁd {A >FJ- OYZim L\3ҿju5{oa"e%EbBXDwf }JV!9>עrȃs7kߵL(eAuh'cF)B#UJJH2+-`paFUJi+0q69p聯yU KH9.Ѿm dE};sbogZ 7p(3܈%*96qlf}OGV`;Lԇwm2.,7 &/̨-Р̹xE P em̪A]F.f4,x)8IF-KBFgSdD*t1 Lȱ;R~H>rz:^5뗭ۻC|YfS?~m\Kn\%ML=̎Z$C hHm'U,S٣w1mc. 0Y6ܘcp{ygdzd7#^{%$~bk]YdYihCz.>jdEks+`\f ;; #ّkmexCˮ{v8]빿цA;a#Fizs@U]+MQ;q>}K wT o3qye/\)#3 ,B`*i9LwrO2z%Ϛȶs,F9SLӘyɋԁ#ap׭LlL[vIWOB<y2Hy?t "_~va1+)Lyb Siwqtfa^~N:4\:2FSSkY\ц%Vo^;}N ua}u*j +U,MQDRԄ_NjhrVOfrQ7, ~oȃ^oPZ sn~zwEz%F|ъpQd\ 72rޠNJPҎk;Y7kmyv2&Wr\''p &brοWœ|@.Gvw1'j&n<1*zq8;* W~u=~?YA'-?pI_4W i^QS_$.fuz9Wӊ["5-8Aekp"6ߤLuI06AfErƖ5+)wNu.xoMK$X*?bo蠦n;2*Sަ༮ܻ-Z6diw}m4nA/(/+ ȴDFȬ %ǔA 'VAyd&fc|+#s$#iK-x}GA@=0!DSIB%dɢu$n,jֻ!Z"!U󴸜>Z[mM6n+'?:z1%7OkE^xXCj1J$*t4)@4uuЌbԑR:MЫA4oӏtUws`/Ld~o>hv'&Ύ߾W~YZFηnNd*fn2A0hejKb2=Y-3^Ҍa֖=$K %l= ȭ+n\ ]rԾZ~:P|1L#lpC\發F QT1$mhr0be te/<9/xDAD(:3"KX?{WFO@;,p3`|)zTJSc*}L1>TJS /<&e)*FMb}oTʾQ7*Fe8Rj;oftդād]x0chJ *9[c j29l",GԄ=̣ar,G 8˲K^qY &: ]i]PYNП(BL]q<;gMmu9L\@)#^dt3$<_DXZtUuf,rgг̔$(X$  SffS(Rՙ QjZ,IeK XRI( <'ǒy6-ˬ| Ai|+J,g%g$ .%s&XgS6 `dǢlҏ/I(]h*tB%43Q!ȹ Q9ALX@N Fj)u7:5%a4%Ea|Ic,%0#s&dIEHDCK IE~/־kc~VqnpUw A?2ޕrg f$3( Rp&,}BS=~-/?-/?4-/?-1ָl"iYViD-,Y"{ܒ셭&NuL';fn<+[(Dw?u];.z.#i 5M{]+yWPlVlcgVdtsˆR15Ὠkӥ9h*7*2\!*,Vg:70l3KOY+Xg/g0H&y4jkk 0$q0dBZ9Bđ@9RZ%XZ]ճ}(iirWՉ 8] >ڕ-oI3 ߦInu}ssݫ߇2kg}1癵|2z,j==TyRa~^H/;QyͦmmW'[B=~8){znw=䦿d*ن9f8a:|6̔b'VW¾B#[J{tX_%W?//;#!;6xX0}>]Fwŵ}>a>89A* As>XdfMZhRpCC$_8?*hֲ슳!@[MM& ߟ # 3+Ln/v옿bSym nt5 ?߼2J7ҕ䗛LSqAB||+>#Zl?*7c|p;ӟ7Z*>onҺhGf_iK5Z*|V:x ҅҅$EҽV*n_߶۰I^L9{wKچZ0/zxR7]Qٝ[/j~w9;ӛp)3oY PAү'f|W{=1T*74J1VQ+p%"d>$B0FY[-Ou` #zIȃ0OEC2qϳ hO18 2_Lr:pK(=ކ}\!}#UMV͙&}Wd!v1k7NJ^?t.D~:)s6LNv)ˬRFyg#ܔZQ>(VUAZ|뫯_s} {hɓoI0*~]Evp}VBH q" aUCx)zhA4Jf8XcStQK΃pj_4i#GAm0Z>x&D *(Ld,&%L^WEFg p2 qRr6s9x'=LYl}.}ףigX*xJgơM v{\6w]z `^WjPKk{NmwgZ6ԲIn/zkx-z^jYƣцw7G=62?Ȼ>|Ƚ+.9ymS[ܵ9hsZPI W[YܪmBS\m4$Y^H޽*q@u{=@&$JhYpML3}^j}IA';}@sEΖGQ%DV`Rk.>J1u(g.+n 3R: yb!XOXc}6s͊>&+]$S:i/^^Xw@_ 1_brnR{>C=At%ȶ!EnLQQzZHwGYF_Q:%;RxPNcuj ɛkgb(BVBI VA8H9h11SO֨#H"o>׃j &LVKBɤFΌ.,bI))h[ &CR)闍 F%+\l;. E'1haZ21Ek&jm۳6UkWt}&EWdYn-ҪGS ì1@]4!CȊ-隄,b)jf!!eM#$=ކ>l'/x,7U#BFqtNKd?{Wܸ BOnCʺncm> E!)>Y (Q)Q$(-&*d~&Se P$"k`@*q}d8Mg\p^($E=ZҔ+f:tay?G|qq< %_gc\..qѴAG U>I5uCFH:G Hlqq++xw/[3bʤy(~|G {PC94ߗH-lOW t&q\{k׍qX*ns_Ťj>oAE 1֔E-Fmo'nۖ;w(n[F*InVSf$\ ALnK*Q^4MNX@bsNxLhMhƩLzJj9V3Dikb v)H[+8|-Ҏfjoś?㾯 -`D(&Iv2ޘPI{/Q<1C*&$B\SCLnZL Rܩ1xFS~#q!ֺ]Vs9꣥ry6VያYjp fL frިZvg3 j{85sp5gz0!O^˺ux;gWth}O_E&S7h0^p6{rn~CJCCRBzq 4Lr/0za:S9iaВuy9*\cnڹ1.e?DM=E?~\czq[9ʇ꼙R y\,g g؊WwvّZH rя|׼)!~wg T;kvJeA&̷w"EݕTrt™2Q\@EYm:(w]7gFnXhq**Qzu1Ta"7SB1p6(QXpEKOD.R"u3^S*&/0QdoE7NM0SU"ۑgyJW;XR*(:"?|s?V 7˷tZ]I$f=+dC\ugu&hى2eCg+t ?7M\ QZmO]q4L-ګ9hE" $hp:Rd 9C-ZV2!AkMR[hEi [g&{ q8]@Q'M.\-IL3VKBI4e)4("&%`ې4bl$Mha+yfZ`x`hVVPKT\  N\柚}  NZ#e.#fI5<[Wz5FMޟ?.َ[| ;oS[B-[gp("D Sb\)i9pI& PZYxyhg)!W$VΣ/EDye iH9 8 m .#4:Ƥ~nz[X"M(Yz')EM[v]8xa'ِg/ D<Ύwav3T"f_~9Я?yw܌:#;n<Յ_pEJ%纘o뫸aS?߿:4!3f8g>|\i7;}?VDyqoshp{?tbȓ^&'_ [_|Zw(*d*"rZ[8Ѱ?#sv"!(HANG=;w)>t8D!F?p zn2/> &?'''~(>t74oJuB4Lv[8gwh|9ɯkwzHi˯ ]=~7-v;Wv _ݢ8Kd4P*tɵ"֕F N0o<I?:ŻQNvpW%~werD4BP/O=pR\["닋Z؊Ab9͌Xt q .*cZXʽ΢aߌnIU5caBM~&Gush-l>~a8ܸ*}2:_$ebOTukBA\;{~NDN`8WoT1Oin9!u{Yn&q8~fxMXMx;f4OD[d_l~zA3oOfsLDտ|fPZ'C Ob V:0MqBY4 D~ u7:ޱS6g@+ζ۵S*@\@΂:")浈 x+TrN8!Ԃ^#9Ч-ט1޸姅9Z/UΧ4QCbh*SF\MBШ!`99 Iolh3e7)Ĩ5q:h. h@Ifrc(脇jEE4D :#ݢrm̹jK2F.9&q! *QRAH,1 Dx02iВ*h+<~GÎZ{NECZ%dUoSds@f2&gƀN%eLDf s)xZDS?URKM+FbtvICZ'tHNF[{>%hVEU}T}芪=| ֟*ptƋA)q5(QZjW %=DڪA:߹XVCb'+ H#5**$ X%6azؑ_.0m[1}ٷ;(c)d~[KuBYr'8(]UXU%1W]ErIdpf`٩!QSO;,;'1ާeaA|H÷P^P jBȚXj^f9f]LVi0$:KS"Ki2ɛ%y.t$3$Z~~ ~gzpw7+{Vެ~޵{zm)zr_1xg,FtpVQAu* ʄ.*+,A^*׳%Xd#oSӟ\ !hK@p(Mg CT*5_'Y|o3-e3W /:/PQ( XeX 2AhEN!寢 Z ,a1#ʟx\OlN,sFR,5*yt$VT pBWKq^4@74y PVCE(j4RPp-WAB[Rmf,z,y57\SF;^+|gh0'M.kL6NK$-"Rܓ,-(O84EgA) UbI^&~Ϸcn*.v],usrsncGҧ*6oƹLkčTK7 cdq_?lm8ݼݕ|?Ţ`9S M-kR!V* FaG{i2WVes8ot|T ݘ5g$o.w}ԫg4O-OG|<#/<ꀃkt74ϣ*@lGVѶzҊWc疡±;Y5kWyzktv~&DHWox4K{'Wɤe\|-xg/⹇~TDj\oZGW٫A7RHV{qub7]>OW4hh%(CjoqI8秫a}XXƵ`YH 7GIڮWכ^WI5ݜ(]nגי4ZD'֌qm"&O ܤ݁K)(} FUBƚ6` +]B ۫{@K`K.WI %{*AQe XuAB!S {.0i]MŧL&SZ4ktl 7-IozՉdqr vr vXq=6ӻ1-[n޵C&7 L!&Ȏ.NwD0d۾7?>Yi|jLGng؁W8}`nC.Ol-z9oYGbr0Z%:t)2j/5ʳ'J ^@Rp*{=Ow_ƹF5H}@ьϫK7^j\FjOm~n686قiܟ ,lgp#6HÁp;p_̣`ײg i*kbW54exq 3Dz !EeOⱭ7_8`ޙ $~Us9fiLhAȎru (y5*0iRc ~gʍ]ѱ*TF (#?M6-_ťÊp!h[CoqҶ>g1*mt `֪\d0}ꭱœ*2_dNA.9N;\N>%en -Oɕ 4T1Rj2For4guT{:yfeZmoG@TmL^jMٻ"UɒU&ZNM0ǦNf-.NBυu}~1.ږnp??<ݛ!܏Y2g*|w~̸.<ɝSޥgum~OMۻ={x~AW)⾍f? BZvaǓpVyP4moyt="˸DxjywpQ/O=` 3,u*i+/M^chqsBg %:o}{;kN]~jҀTT^M")ϝw1!^F"gO( Cm@+9E~FzR/DDȓ5$LfE5JIE0 1Z 5|`)!QYfk/}6ÊP|tQM9)(AːZ5~%^\c Sm!'i1k܏ikn,z2g]3'cE˷fWoƜwAck,:np?.-432#`%D<;f_ R'o j1(e:f“AiO)zrҩ0!b~m?[=kя-w.:V)<Z2_?3{yXnf-r.t0tjѐSr #),V;TP T}gT(( ,z,, kMMb5m`LEƩ$'8g\CKtI}mMnަ!N-_E[v{>r Vb.:::B**7Cl"ȷBlijUB^:Q% F#?C|~CCj12vN);e.h=TE Wo7F22-vCח>o,Cg!{2~L}7~neVf;-4ux耔2Rl[03ŵqAQv ~@Z/L BEجTK"㬆DvY{Q{IKrmbgQ{%! ðl&Pxthȓߦk6 3M|g!{[v{;IzKAdC4+U}ۼ:A@$¡Щ)g racR8'YioM>0`Tʐex'Y@ym%{[^7΁"?BKkJ4j4_Kik(E% YjbtwXT gh MRmuX4&g"DW"W"RJ D)bt!zW.kGU/$ %T5bB]U lu(B$- {Re &22Z3 so]$ҤmhFwp^(<]K~ÿ>@*F+CaTJ?7y{hϛDkmֺdч/.2:00ry)8%VR77)8gpwa\޵>m$OwW4\w{Nvě$|*<%F%);u E$%QDCpt϶ >"^(l7_>y;XvA>]!@5FGdVeD+$1yXqM;Qx7OS jC,_t!iepw ]vXo;yR޲rϴt`آ3I -Vl^ G|g%ӫ@U=UÞڪaO 㛩aNx%Q^$@KRZ5M‡)p"Ag>T~Nͥ 4 ue)$5:T݇?-ؑԻh-uu."i_:c#㔉3ڑH:*]a_4LhQ|K%8%-!-i!oݸ:~{&z͓=hx2̝~V<`v@1_f0MY)Hb@Ns? YNùw `F͑8r+V o1w_F 8dX}jꙀ?~c*f=a0I}P Q^]cUQZNLC40F:_ZI[ XygAboWt#k$j/<ĶpTM륪7^bd\oᇋF 78w5y|SŰ/AF>?iA%RԟxX|RSY!ys}VE:NgλAe?=ynh-~XqB[-mFی[DJ2Cڴm轎=M)uCg6yR%V=D*=Z%o&--51iꖖgYZ>4B-X&m3GOxJ*!c+95q gE\BQ,Wo.Ns="Aͅ{& &n^tc0&OxQMk61j} !;b8, آq ɋmKuUkuVALW *M)f'^j&#Ѥ~&Ӄ/~E)/y M0}8sME'Ǒ+/?~8j/\R-O32E8PwbH@PΪUxBf/z5>|qpp!UJέUŴØJ7̃V+ԇoC{͙_P /j~#;pZ+ܠx"4UKAa6̋]Mn&w'EWŧ_ft1<0lgYsAy}dRdmJǥ+#6hgIbO ö@wo= Զdn&ẁ0%0_E3M3'h 8+'bÁO/n@\11%Ah'BG=zhzZc8tOd t?fsrDKQZe&KK < AE|Q5;`'Z3`QER`.KZY|0xfv8}@k/`M8$nԈལfkPD~y:pA)mX-d1q .DX.EE ͛wn\6A =BYS0L)aIO1i"M.)Wo!Hr|tֲiA�?4awc1`h%̀{~mQ=6sxre=]_NeCo^PĈ*[e,U ޒi Yw@R aa-+# 5Qs宎u6i#M/lpnQJ6ZWiA6 x1z'gyWLǃoBW'〳ٳMSD;na|VOa.bL ?\ >|[h8{?n&Z6‹A"BN mg[szpt~MHϘetφad1#c nqx[p3Ӗye-yYM^=ГWTW4ҵ>3sy8xX09k &hIsUbUP-E0aevǻu"wIX g=\^oۘ[I\߰n67vx>|ar4T3ZwO[ ^WCцku0u005ѿk[lλՍ1/:ۇHߍFӅ9HX\6O)xW+(OV/CD*9($!U9M3!tz\Ke(-y(mʙG:[-W`<:-ƧGyEG>k}V7Y_mI_/hz!oŵ -) x?SSߵr`߮]TOªP?u 6n˛*!NlP>j킺MHT$"Y-)d W- ѣ*9]V*\;mǬrֽ+OAT)(;h$BD' 흢r>[E*i N k(|? &8u3`h;f,$':B pc1*p@M ]9C%щMntJ;V:"I1q:5o#H{ߝ{>H J8;aԢR.#% YN hI$ř58iy$ʨN(6,g Ɠd| 8,i)DOYكEhc"|[3هLDLIp8"YZ Obl 5wd}9ώ%N./%JA5?=o\m6X,MNvKo>`)>?jd&]B]ѥ8th)݈Y(DHß3&(/|D "\nt IW*6a-72z/–=zJ%RTsgn0:*G? > "M C|)|~;6aW?ƫ~+vpxstx;#?| + 'YsLsʅ&fH~ݲ8 V5^4djmKLyKU!gRVж}ll:y-Wp|vNsܞu1z4_O~]/9!rnY~h΃ ql8 Nր<տ:$ϡ,&dzJ^'m<.]ݔ0FOT P Ad")!Ȩ9[e- ruwʟϙ= s$ZCJɒJjP2#"QY /v0鞷nHGtKۯ+-']azOj,5]{h8?"l>)4|*|MPensOd){9gF]j_Hoyv[-jLpZ;3QL) S~ν𑧞<A2T:x%ԙN}K=G#SG~'Y#02rkLpF$QN` Jcr #[587q,?^]fTݮݫ.wNȍ&-2Am:fg()sXus)zlK2KGnsob KΣdtvS~R]핷j^*YrB͟nptoQtSx{ӳ|Kƫx;Z m9oʷ^-< '/{mLPașdTmJ$qΫ͡MEQ~$*yo5PH<s$w7)@sIK딱*KIznl $IټI93⩣|bi˴҄_>G!1Yrw'L60Y2^kzzպ9|(0tk6NDWS5!,WxkBb1Kls~'*W+<ܴa0; B ';Nn{ף~GVYgY% z'\2"sfFS,.3YZIJ0´%՝\j\&fv䏅5.8' X`2\~=ܼeȁ[{vivGQuĤkkq3n`o[I4;'!ʙH*@*%0s#hb;>֜"Y`y:6zWS!YZPNѿF2iNP`O'KWS,c,eW_%\H  ̸>Ju*p5GR޻WWWoyY^ \,%>fWߟݓ|]{3zM P[kC%vu!T:0NHXx*g _7ܣB-Y32Tj>.%Ϧ@eH1MNͼD0&@QB[DD }SI{Sw^^IQbjgu2ovc5Iilz +?zd^ӡo(̳Wîٗ^y{_ |^5kǾ~5m*AXdX"@{=*Jy#Z%V~{lHX#j5L J*ΒOVYOlDF\$/bL$5sՆkㄡ>PBDR%Yic`#m1q;\OF:y<:w%PV5ŭ~ņ;hѢNY$- 2(QՉy##^@G,&er9&N#y.I2FGMp g\F"FSTP\[i )Rf gf HC^(HJrJPALTTNҬ6:$3V o)H6K,Rpc؉sIĢSH֣stN ?". th 6@uFn@%V*jtLsr.9p! :1RA*-s TzbTВ:`z9vDsl}N3kbZ82𧻃GqpvK``?xcrHr{IE %'fOMwuMB!;jJQ;A]Ii`c8ZBI9x0oۂ10DhTHCKK)FD§j [AXN(m:J s.pd<N43Rh`CdWB ' @?JCER ἖jc%7ONOWEtF!bÒP%-Tkl J$#l4r |=sCx\ zJ*4o-5ӗ<]oZ_qeŮd߀o;I@]}F :)$j);D%r| J~aŝ¶vIr1c Z[$jH X6ma#O[k"ﴶ.at0TF#!Y 1* RxD#)"1\F~%A8O"|-o՛FhM6KvL>/:Y] V;B#!,jl˘Ga`G`!0jq%u*h5 84BkM̨hQ-1,%! i;R-r_qWSMqqŮ.rBuv3i{j &E!{t?0%Ͷ1EHԉfü7W29fV}5|ml{Z%Az3Z#'Z7Gd;ЦQ'ٌR"ӛ7M@8 tǠow{ǿv5~Nv{oMM_ 1JW**EH:FD?#FX9NGCD8DHܰq':,4Ei$*GSs#aoU`XryZd%[qZo; YwηWOS$(tz%q̒)@X +ƞ+kb"EN8D` MUڪ?tI'b][ }D=B4 %ԆO iepF}~b;1p1O,?4st>az8_M)]'wsh9`Ϙ_*먍_|٤&,Fo͸wcrM 6R0]=,0Og^^п%nЇIo8߃+5N~/Lӑ ғ ޻ x%.c? | >+btF_]Yo <9.ypdR20hR $6$ -Oz;I3[o˭~^ޟ$_$|&f:'u]J]VIrfi0rf]~")'IE2!(L'Z_|5mf>U( /H,S*ynxwwX<[{[Ƈhfmg(vi(I8ؗ:9zp9fS4f8~no_=5nN( :4\8kAgG!H'(-Sc [:=V Ql=N;ro!% 3Bs4 q[#gW,OIxՖgՈ|{N t.'7} *wUSWY1^}&*Ԣu:_hiQXu>GWZOhI-%RrSzF3vzLHiC;.'tK9Cu Q*D4$sϱE,RLLR62 U( (}UJlB Oc*5 <5kv[Fg4Y?M={Lj9l:*GO M*HN~rPp.YFa{JMm{TxGDJNcRɜ2J ! Lq=t=fJH7 C]3D^B&pLڎpB[1\P_PVбu;t?ִW!O9WW6J+Hií`2n#YN(B)-!:Pđlts,qcJA(%F/(ta5|j#k?, M@eQÅɜJfJ:EIŲ_ lR}o%~܎(lMۿd E~<Ê5,2BS_f6Y@SYZlaFn\6l͐Qz /_ѼYqm4!TEvڨC$Xҹ b1X 1L?bcmKQ?ظ="fdn:ˑWɮsXQ[Vj.͠q-4hC*:7/!BMAXgd׫R +P 0,1:LU;=ݜ.{Jja 9PH$ZX.*l&j[ȾlX2I0fb1?L X2sc7"_bJ~S_avF4 !k"d+1^ϩ.G2M3V Fy # UKlڗ1/WO0[w;V=<=W/9 hyZZ=;A*?7̾u&b9:Μ4%}))UAEd 8)* .'׀̫)jA'E4Cid !*""OBFE0*\:4C3[14nkRT%Qf,&.b ($<>XNgkw%e4zF_k rjw3P0L9E=7暤i0/k?Y$aūN=ksӟ*|󪶶&o^Wn_f"nFG%=:?WaV&1O8?ݣ0(*XJ.ыe)9 /&Blzիm{^7.k>Å?=b)s3ʗ[Ƣc\WjX7 Œ9K˭.#:F ˰d悔WXlzWzKц"h`k@rp)BSϘ8P=t8iZ+Iߪ>|Ԗz/׷uxP: )K>O.p(Vk(ۦ߆-*R}V{Xw^m*jд<}c{{TNy`2r~;YTۚUk{abLG3H"[z7^xWaɼF4Fe^H#Y2JStqXp>72Ex9ƕ~=.<̋CW`T+,D&2""&ZH0| [PD4 !:5x]koˑ+&]- "? ${IY ߷jH(ذ,͞gA:ZW6vy-f;C2ے}fUaLʼn t9 i,SҗV2\rq=lσJ4* s"FH)ٳV\tVBSD,5jDuvBy=6fz{H *w{I9#S!c.3y![%KA)X$)  Oܘ~d_>E#Gu5vt"H l8!ID$u5R( H4TҗRcѱ% mU_d=}#qK?L7mIuu"M MN\ׄoDy&ZC'kl.jyp:\5\k*QÕa0PpN'T[I@v$Wus6`SP\W*sQXJ.KG5 \= >7~;6 ly=| U VY}vCo|ms7ь)m?t~ z:HwoAm>ړ"kX\  xb[ͪvtmHᴫnb[6kiݿ]6~u=wZ|GOwo}>2?2} ^}nZzGVhnioݜ[|m' \0V kls!!jiÝf = cNsi#8i=?( }a;3!lJ+f]ML3Sq;qNvNF <&T{Kd $+19("JV%>CۉRk>JsbE@D(n*F)vڋ[Z~kzuh1Zħ)Ug>iieRM_BZ $-Jcd⍊.NFu)2 :5"7IE)fbE %yPRZ"PYq<* Q3LgRv20ZJ&u2 ;H O!HQYֹ_ou98>Yq1ל~KًcVRKԂN"wW_p_Mj)w<1P}799FW+6o49SUҰ^Y b6<`BT&YX[$,KK1g*R`=""h "mNusT|A{83ƅIƱ\hF. o7.?f~HtZ|Yu47ՠj$T@ IBbr1"nJk!]R$ᙵ FcgZd=u, o{> &CRmJQdKm=*X1-};gcb{cYfdQ%Ys*B!'YyBr p'2,t5j,^hc} B 5CЊ2[䚔Y4" G(|ÇT4"D:[{<·QOeooR&b #&2"̈02Ȉ$D͘/6tZBD[ Ʃ¿6m8<$Br3VmcLVB<RɆ[!F["{Gw: /.6\1w n~<#cJONtk|.b͛rcKb{-R*>M$k(Os5!d^!_0!͐K ~5EKcm޶YZu?e]OcJ&TAjr\ajeCh5z.Bi s(TCWWWCWF:G:N}G%Hm6mEYMj&L>8a/zw L~?0/WMMT;ᢑlĥ]9;>zܚ"&iD-4hˡ4z3i گ6մeNdg)nxDL 8KxV;-G'Y~}1Mu>o@JT5o/ yYof0N}ݪ@k)޴[G/Hm~,~W7a+]&sUk)B6Sf7fyzݧ,dd8W)XWRHpE)D+J9*K "B]ftEpe5[Vΐ[-lEt]葮 kOtEh5 OFΑ(ǡѕUDWWCWVPj9wCWjϩWLPՓAapՉK֝A(Pâ+u]ziugep@p@!c'-7q9hc #@iuY0iGQ<^M~ONQ A%0lwMp5 To囓*9rOt}.A>TedeXo I4u9y&ߖ? hr2?縜|3W_WYrzʐ:2VZ [JZiIThBjomZ_L2u}s/JT$'*v;w@Uj%7i8 7^Lެ.3/ބtVvg+mB۬vyE]Ūqj1F(u3(>lEtAj }<A)BFzvBU]ZNWRΐt"RuNWT]ZC+B9^ F)fT\(gКFVq^Mk%i̵4=;ah`BHgHq _̍FTTzZ^=i\81g |H𶑄.YcMY4HqV&Mq2¯ӹϡѬ*v"p(2QGhmtB)G?]Ӗ"v/+˫R$rtE(h#]f+]K&tP:6ҕJU_BTCWZ j>B)FwS/m>Aap5C ' =`ۻЕة1*+l+ B\ȡb3+a5V"BZP-HW/BWA :yٓCZƪ1 C+BiHWgHW2"Bd5tpՋ5<CHWHW`U'̐bIf })م6ڂӊ?ʊhG$JBӄֈ494u^LOuLvdz:A+5tăD6l-Sc5pQRHF)$Fע.tPx2rJɪ"B//"/$"zҸΐQ5m`[=yDah%:]J5n#]9j:%UCWT]Z;xBIt%]e*+| j+(tE( J2-|RK&.b\r.r㬦=;,N^0=;BiPZi i*Dt7^qi=-fOvV%-M2tvIbYPG晰(WB-Ճ O@QL VӁ-VSV%F:C++\ =u˃>[B)s+ǥd5h9 n=thC+BHW ]=^6?=]tu\`'.wSU< В\ʎtus]]`=yN>4<#]#])gy~`+X=tp+DF,͝7<5Q޺|NLVm~|NiD+Oqhsյ_w놓^㧟~Z vپ778[Z5߶IV~xoC(?Utj+&DO>LA/pOr~²mht(߷C6B1 Zǫ.2H$`s&%źܷ{ܶZj'p,=Yf?(nfj5o&{ot~Tp?=B*J.H}cn?-a qR_!@9Ci~~PWDIyn> lz2셂K)4 5O"p2̃4<] xn"RG·/5$)wM~{MN~[0p7,_7p7xsT,Vɂ4*gZ*`'@љ)X'C,`9B'6>$bHl*%]rG6^w̌|L=趇em)a4h v5EDIRtMHҾ?ܙ4[`]iӅ;1 (cm6Ljb2X0.90d51P Tct̒a2a2zӸMdW!VbhS ]}jj]ӓ1 **z.h5r(#jKzX̘im0fh[WW(:6ܸHyZ !g@BE3gO lzDA\-D\{m]#:TŚZ' CX/@!&ʻLqe(% ESi!{AicmMr9 YWeQ\Zj3`{d;;Ƴ0npѡ hJl@7}J"m*|ݻѡ%{U,%\bNqMݑ(WJ5 *&n@6'0 84؄ d d࠸V?3]ҴV2:$LX^p[L [Wѻ;dWv`@j:7CAwjG 2Pư)0} 14ku@ k&L7KTڴVAx`f#aaڡ?%0ֲ488)ԙ=H`=>*@A&:ĬA0ཹᬞ-teWYΨRX6[gAN|]V#EΡqݫ.Rz(eaՑj&[ %DgL[ ` d!*DbbІmiĈ\+1Ɛ׋TMY22he(V%uʲc6999h>q8HM10B}~8`oŇ~/ۚ\/?z{U0] +?z7w fLf JcGT^Z3TeW$FHVB8Z*3d`ayR@ q0_*XX芺aFR|&QL&zZgt^۰`T-Nr3`^RX"ɚLq.Aݺ& q':CpXPyKvd*֝6Pne1P; f///7WqUZw } *>Lb+tmHmz,ԥf| EȋU}L!Ŗ|+&& 8ꦟ5r1 j3`Q֚U-RhBVD;v@ n z=jC [jw ,hKٯ}A:G nhM=<,3Ȧ td%N2XtY$ fzaCd $CA &qÍngyش KI`+ :LB`vyXgo>?FvHWY MS(5g֊o3= {iz4{5ED7BZfF[HN.` ྠg&/.Hn >о 6b\yo]O`r?ܦ>ę/ #xB+4z!F(Eqm> >PmZ{%ecg+0Še0ڽAY AfUg${8nX0# |/vts]^`72Vsua[uQ˵2+|Kg2)l!&sp |EB!?]-,lp}~=4)[>@-rwAwuU dAeA w 6uX _b=per_O+4%̢LJKFd5a=]GCX1c&قn-;. Ay\ڇUkRWɇҔ"j9+EЬ#W ˭0p@B; ,wW*{ PsH}fAAxwKB{9>ŭWW7GgJ-ݵug'p3{%Bg&sa's>w__vݷAvw[wy77?L|1?l_W7nn]ycl|Mלe7. >|cZ>c???|'OG7/\zy;aWG w>2b/#k3R7Bsf;Idh3I(> >7|kII MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&@䶔V$qQP+N2 dEjH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 tI W-%ZQLHJHF{I Ld4 tI +7Y5 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@:$$$F$ţOYhЬISLg$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MiH@$&4 I MNGx>ڿ^}w)GMw6F=wkW.o]ⰥsNpIvfK@2E .BpMjvCt%v޸VJpt%(Y=g +fJz ] dm ܕ^fv~> o'mȟywZ7??_hh^`r~ &;c:c~>W_)!Hf34-pM M Z>z,(}R>Ar^]}982}w].{Pd/?\}].Qo^}j koFvW\X~ ˫vb=>{4z7b<(."5Ul!?<_m,-rP>oW~~{n׼VnTq2:[Y^c"gV 1~Wg= .\--9Rc2T6{MsIۜ;N B7|WISt1/z= 6{2Ge`JY|ӿy.s z`uwUD/|Sӽ-~Kjޟ-xCf8$Un޾ac27a?*'+l9qtn&-n]K3]*(A>.pe㾽1ӡF2 n"ME{HH5%>4Trz‚7ĴfCv;LJ3LS2vbluGVѩ7†GX^eZo`Nڒ=@u[E%^ +h@Dq1"btH*e5@, H (bì"bU=LG eZk5f,`ZFL&Z'+L<ؼH5E;4j| ˤoM7b*|Yr@5RV[2\٬ί)'2Kx< [»g]&UPkktvog!7V 0A.D6gr38  uU:So3/dH>D6 !e)Yo=M{/3-s#0. cG|~I=gox6]O׶cwZdXlϓn\-ɖl=횶%(j״M4m+B3,& ūTK3K ٘P;S%F7[;qf ޴:@9˶-瀜AC쳒sU,ڴ($: #;;hsN4uRa4:LQך:ey䉍+sL(QmGs+:2,"1rbrX+B"Iykl%>&+?JZf1gЭU䣣 zkcDb`Һ9gQ`C 3|cc|,NHogNλ@2JYG˕Yu[j{Uh d )x\aA>zD팈P Q0jH%;Rj,7ЃQia1(jxTs`.0 J" i-{+ARײ&vE l GZU~O*ǭo\خt{+q? ב[{<2*ՑKSJz> D"o1!XHU 4: |ˎ-B"YNJH1 N9ťoW12#O[r ze"Tؚ8ۑ=3“%41"7wq(oJ7+N᰸w`%eJE) .bL)XGC-/u(TC2'ZK(II͂Y$R:r!"'UKmmy,Zx(j˖Q[t1M=>p$Xo$0c@ p0_Xn #@)ÁXZȅ4*X 1+ k|@N@@>r i[g;VF}q [ED2";D(-Sͩr`)ƂqG``0 HVQ+ l: Ą3[XA`Ir,i$53m[g;"^_hj XΝi^P\-pNyK4. !o4(-@ ňCOǂax“Xvlr(

02 1'ZgG1 6|^ї9XɢX3swV>s*y /'}rsG)b.u ($VVe 8܏ʑv}d =!"} ؇"Q"!#K9 Q Ƶ5q+u eꞱ@v48%zH%jM~&TM~͗˓Vk6K5϶MhM JI_w9_u_ VӍBA].^ݨ3_O,^TTJb`۔ ߽ͤqeKO66Je!٠LfL )&z jxФC^yG o&*Dl"Arq] FKbtFοXwUH}N㏽>9xp"cR,0gr@J|2{?M ּUqg5~OJ5)u34U@c!P'$Lهy9'WRa⩤ :Ʉ~3$zU8<\ -wǛa%|l.? #hVSʠ*nz1^o_tst:4OrK BTбpnO8U6U|:/m%`S)uv?qtNԫQ&з*LY/Lrߟ?{۶"?݋ ~{v"qcK$I3$%Ke[%S4ExΜ qr5-wוDhq}l!08ͣ'? |ߤ)\I3]WEmiD C퍇ۓw@ FEs'w7_#|.^// m觉~~<@?9wi@Sg1:uT#XL-at9x DZf{?}u8m4}暫IdjN3Լɚjz^i_x+V1}vonX]bci@m<ζ>z`ߌdmb`PPȒD@PޤLaziD<' TǬ3i>X,bZzBP  ѐu*:́'sU0 M`d˴ĴF.lE$Z!=/ivckm[rW-vzUD{ҐC}j{PWd~y5M>Ws\R~JwACKg)uAaC->w,csXƺѱ ^hJ{!cV:P4Mj@۱i0R ۺ<;f#5'g j&3r۪ˢIͫڷy|77.P\GnƓϻzs5#nv E;ʼ:7va "]|=XKΪ q+o٫ړuXnK>c+9uՙ: W=ȋay=O>cMaa xo͑ 5?ZYd4nPỢTE؇"i,>n|9<)__c|WG?-oE,eJ/=gT0 ?%+34 >n`"] XY{0f*(,B$aQd Nk]TC(u+4-fG`|>7CL?\ T3ntQHfU0[XF2HOgCPLf6,Vocl~%ͽ%kO9\I;Wؕߏ跃;H{oMng(=]øI)GCYZ:2xlJoV.V ;UmհY56xu`BP ϑTCe>7wpx˝jhgO)"XKXzH) qY)g?|}F>kMbm܋T<TN7ܾfBBM[ۜa#ʕ]877?jކGkB9sr0PR'h $;@ol*UbZKa+‰5sMBR\ BRnX'Ne:O؋%hQA)IN#P*!9@fצA"˥'K4B^aQ?3E: v-:eUa!)mgQhJH< nNhk8gg7jcAATBLbۆ]F`E&;w/St\Ja.W#tܻM_.%I/;|;׉T]ͦ4{nFv|+ߨVVQ.pX|LtEqAl戋Mq-2gppUΪZ ߎ+W OwOF^4?qWwWױv0-uLbGr$/a1< gzB lяm:B aENߚMqwpW..؉o4:6?`s6>:)E NĻ"~)^QQ:‚:%gZqyÙNӍ|6&wsmkh-# $n:mp5v7~ Qz,FJ(zJpg[Wm<,O zIx7V?Rq,<j@ k" _^nҩ8^C7Y7DD;P1X3M#.-%R:_oz>\,*{Q=wQOUGg>2W#ZUImU7R^7*N]q!D)Rx!F _EsLrf"rzB@avp hؔVPǻ Kq!d'aE _ c&8*\ͭ8;5ز K6l[AowQzV !(` ~b k-E0k3MJc,-&a*GT݉* IDȜ] :*08(悸#`gisf삧2U^{Q 6 p,&/)B{G1 6LxPI&[ڭ!14XIB<bXgn6e.8 >i Φ dxłq=@ 7"OYH9b!+ L\RAkEÌL̿Of]q& ]s}%{j{fp1_ ZXu,ʺPoi"!my?t2'ae""[zmd>u2C]6}(6MK&3Odv 2-IЄVyYQk:g*jFXB 6IT-/!:V,SCpiT<,=cYQ~#Ր7uS_/5:b /%n$m$y \~*6{}>Ø50xr{q!Kb-*ԟjm9;IՓ\ڣ(X2ڛ(˾4GAt9 QN9JZ#Jպ/th:]JM@W_ ]N==#!NW6֮6kqj#tAj3LttEv"27Yo tE@WHWLqDWUD-]+D@W'HW\IM _ۢ ]!ZycPB:A$)zDWؒU7thY Qr6)ҕVTKˁ\wj[F˦DEv)p ڨjf //◟߾.&Y}jѼi#KJK* j/1Kŷ~nehkBӈVwg(`"MKq7k*V^L(EaxɻQ<*M(M4Wr4ή}wĺr\8S,a9l5i`=޳YcԺ`!pb~k5;6w߼yűͤR8Jb. n303M|_=1z*Q %}Q dJ!jP OP)Z'Z*zCW]!}LNW oFN\?.7+zwhUNrIND1hUAD7_]N=;#FNW9S=vFh?Rf(bj۩j*zDW؈5NB -W]+D)@W'HW2#M [ RBt ] ]q+[>kWBGϙ }+ID QvD;Eh{f7jy Sa>4l44R.wS K7Bc4L.z\;TED(Z\gyY_ܠY'fKri{V&r+7R1`mǹ ɕů/uw$Wnǡwʫ<9Z?B|;G 7C*Wz$SKa h+ȕZ^\ %rureЁ˕sF7Q Q˕PʕE|FH+ <\7\ҮXG+\\ȕІtV:FDd7LI/i{ߠhֽQ۱ ax|Ds'4^ CK_W ~7'N7ow{?û3\$x@>z#_you?|9uĔMʵn !4Rׯ> _?OG "dfϨG5Akr1Q'CoH2䃓^͟^IPo~ct殍ͼF_6r#qgsR&v%Y{#_GV=Ezvȕa8ZtQUTH؏a榡tH\}7r;=*d?gj.x[wܟ85x$;A]Fٹu9hoF~. a_4mܽ燓_\HvU-ox~>Wg-э?>7yz[]~m[mz-SPM?tqqYmS#_:(V߮ޠqbss{y|x#{'vj[!EΫ;Û>+]r~˖O7;|r&ݟc|3'd>sO7Gʛ{|rךq߿S_e_]~Q?a|na>U 'ʯw̯kgt˖y xY6Tm-/͛d?˱%C5.}59]M>GV妳x}H,eHw|7kgd>~C{7Vᄎþ~KɛtQ*kzfprN'R_(Qyhf&L:4j̬х:kU qʹRoЬO幟a[gkg,rg?/44SJܺD &X,LoI]eY;j͉!f+-Š I˭&k1J6ׂif]^u9E˞[iѵhin ˷o߿K;Wj6swJkTMw '嚬GR=FIEvwn׈fxf8:TP{))ϕ'"Z#)wwZd]mc6J\5:[')K"LEt(ιcH.M~}4 4fz^bDfZA# !'z]AF\T)oj۫TگT)΢x*sr>L&sf!)\rE)1ؽt0{y9xy۶; EeyaD)Qn!VϾJWDmluXBs+}`1'˺W5Dž%̆=ƺ2^џ]-*`"{*HJYlXkYh̡ m "׎]SpPUycL)`w>ap\̧]6Ajb4)* #  ʄW$ QAQ)tN૒2 VXPc9l&!l6@_Go[pB]:?೮ ++#q3Fnͷ.@Q}Z PDdEINbTy>MFݚa,VXC7a̿:<_n)ԈKPI!0DVx8fdcFx)VZ!h2ݙRpHq Ls$e中j L{YR Rl ej@!N) ecT]T̬Vr".QVOd瞑,f^n!G%ʦ,$2%V#5":J(k ` e1aI iD< =Iۍ ' \ˉ3RUYdbLB*b0F!M%:/lv==ve=,X >n}-l/S%4M'$ >:%!upХ:\Ko:@GV*$_{HVT6H (yG 1/+,J茸P4I"iyM,CB6F2]K1گ%$(AM{Me :YnmG-8rK_*,Yߨ"bΩFmTB] !Hb"ҷÚc~z}z.~g. O㐋=`d9^.wPY` ]jN9mЖa/Z@mDD c@ azu5Ҙ n+c^0%rJh F;ےXuu ++\36X۫gt aroێ39fnqIubl*'J[kqLjBמEw64IN`FCP$[wRJRNnUՠ{9`۷ZFBk` Q"E+>%KPC$%2zn|~ K6^*dSP0u36y 5I"#l\F `_;;,Fv ݚ<@%8O)'GCoׄ fL=]$pj3bO&Vj7LJȀ-z9 úb樇 |yB1C[_s"^'%\IagZxF_!n6f{vnI4X_C+gXduwUd}V*z*@%J_* wB\p=-`5D}]0Ʀ OOnW*&W''.\29iXwz<°2p!v*B J1: 5R0s 6{jRCU8U|H$Ygҗ "tZҸe,ܳFrXw{U jo:j OU䀻ЫP ZC =,Nae%#:>U@2|.@ܰ|cQ2;=zJ88R]a$:^=@p!`~3Xf6b(J \UT]so`Ju >Nx 2Kin? 00ﳦ Eױx/'ci*c2~]@T> }eւӮ)gi21RT*[XZ@o*sB㋺D2pՋ2 Ot~||ռ,IJ{U^:'N7k ;K|A,\ ͙g6Wy˵c2 am^{88VӋ(!^q63tp ]!ZbʂͻUw"\mBW=](!ҕXS6 L.j P*7SٻXWu-{/6@3}x ޿Vh  v(ݻ[Е'ҡy%?|:Vpg]+S[TF] ]I|;\J2|#Ag J-BWV+@!:@R 7]{ ]!\B;]!Jä+&$699w(6Юz)F UY:DIFJw{vR|4*PR(ecZ|+3~7h6P>BTo;^&2{|uӁ牫*W%y W߄Dar5I9ث8t)D_BD)A!:kgU jD3th"]fKp_3;CWw&~_ܟtDWHW^|3펮w+CWV}+D)BW0?43!Ֆt%Z0t%=;t5+Kt%CϙpkY#P7esvh;0*iA?af0=K (:Z컆,WGٟ^MDq*N[~ڧXnﭽwVoWGBKELI6MkeM{*'ٵdxx&uu"@[S3RA^j̎Zkّ-?eUIGף~?AG*d+^m(L3}ʢOqNM I18{:㎵vkO˳d\OwVMmC- * ô쐳%q]qfߝ5@&g5!<9+l|g :^%J/R3tp3]+D@i\[JIO!B;tpMg+D ޽dcma+&bEݡiTghx֊}iD8Ҵve.xk"hRu]tPn^#!3÷mai5[]g5 fOÈnP9)7?; e0)Oվ:UjG~_{rBUEsBA>=߳W5 O G0Z'K`Ve[ =7ߚ_qd,uEk <e=5v/E1p(77T>]d2X%BMϗb\l98K<|0:.gߍOe e J!z"9!UrUNH'IkUɔ4Wl rqX*MUmynD D B ,ylB8)+a W\:@pI>2X2 +,)1p dM borG.H'*D>nwi8d2a`-*ˬ*1&7'Ur\{lJoOK2 v>a =8Pկ\OHoO)Sܭ kmCߛ°w/\,74GCM t*NE3z۰cƼ"G1mS[ɬן0gҔk^N!MMO8^s%X ez^){uK~ ų'?Mg|L'kN(^"{3cq1`cQ\"'̹ .J&/p^tDJO3̟p@|Nh߳@IzrlW\D=0Sx:Բ?pMm*&f;Ĕ/d̢ڄ̘7Eg/͊nڭ8zd3󤫓 ]cY&`Uuʰfq\.VeWT b!a7.AX#LÌ }!iFߡPP|1L0x4v<U}UYIJ_\eا}/}FWq}''P|nk՚Vz/4sfA ٳȰ&4fM ;_~}*N]jӺ9Nji<<\/mEe}o|Ϲ7:鷆&-kɪ}YN.OEh*lͯŪ:6;zMKjFa|\U{tJO%#b t@7Cɧ*㯌 Knj<Az-AZg\taQ|7G_8IyѯaAR7eg*2nw^#.㙙q[݃pq˅Am[no@Atμ+g{r3+D>k\e]Uf)1"LRŤ/SU) '"޵q$B[ #{,p^` YFO[D2$eG^_ 2IQPY69鮪Uu=bLm3ا%¯ϗ^ib!ÛUL3nlɩxW¦:e]fwQ?:BiHt3.&D d:!4$N)]bU,^" oP#xkag׃Ӿ\C (Ǟ ;/t-mi6wG~AV7zȸ?7팧 _+{R1= X^7Vˢl<%/<Qx4t.|;U1ci?ɘ-#΃М{c6,A:0ʰ$l  DlV!.f'0rk ?9y>r0pCqԂhdn6C\k%ÃϦc~>'J}&sm>%ˠMS g«m#YZ1#`7cS0D mPRsƞ1mX@c-_ Q3 8Bc&*)ryJpddRY3) ›.B+!rcJ2MgV-"ow6qj52@oi$ u#[/]rEbIExR.j$B,Ff= ,Sfc#$bhWAj+4w)q!X hՔΒ$N&&z[yȢjWZ΁ ^ ]9ŗ`AYIΈ@0FR %X@`FtZ])9,(jfBbF!yQ tsDStYGΣ5ʣ&w.5|F! s QN,e\&[I Ӂ>ý==%ӤG~k~.U zs3KT l.)A-}X(!}o{֫| g)k%eV*9Yy)\e8@ػl99؈nccGY~&Ēt-.  "jR A锴TV"p019+u+$'E뒣 ;Rk1:bΫݱ)O~2>' *gp1pBĨ@l, Y >y5`΢gVwl~ Ko6dGz zNVcƷ91Z:~d_gzyIJ] 6<@;lYY)|7yVF}V}]xO\X!;w'?6AL 3 42oa+,O:z;z;_əS&R3MJG ZFLB`Qv}w_AmbB+fl*QRÜF!Vf~^iC_y:wNF d@L-m+cgS Q))  DFH:{  :T&ԁ6ϖƒB`hD>\ۑ*m ~ފ>Fqym'!V*fL54!W˯m8{&}a2I(8plR,:,:ea,wuʑg3@x)pH)OS)P*iAm.i4-fݱ,j7<(4MD`!'J 03OXJO9e)䕰NQ6Zڶza&& -sP`sEcB K<9bd.ʈ"Ҟ`p&sޥaj\;&Y!\VH,9Gse81,&Yz^|!ΩZXg@X6BQh @ʐ7ʞZfcsc"*YA1Gc"z8A%FH)>7n#JV\t y@K=? \.Sk<1(|c1'>~;q.bqKߖ^#l2`mvr;6vMw^c-MM4%q|r0hTBOvq!Q\˜fa6 Vty,>==aۋ!MWS;~ozu 5V !=˫uk ߥ`K9M乵w+]Hi--Lgub0Js|NѪ^tWYyqa]@rI[WcضɐLK"Ů7-=3nI+F7&8Pgcv$74]۳6t_k(9.0J|4^C``Jn I"%pV7o+5]Mz籫7E-S>"]M hNޥ/rru&ewDwY_$]匮wCj^Kσ1ݘHH7 m|K]|y֢PnvEʴǟ] ~OuNg>\Xk$7A^жIn (= 1m@d3}r%Re.-Pdۓk@ `SZPNj)&#MIeL,L@(u!ÞIsq5+CsFG'Z?4k8ѱ cn:2(^z8iSq} ǗQsy_srFK^р 鬲y% 0BU2 4WQcy^:' %hS'A0W%0ȓV/ss)Nh27`=4 cp]M^q33{ǣ[X<7ޡf\sfԦT%[s6)ł28x9E9qq)v4t5-3Hky(d dTiA0 Re #H.ĵ[&j$j{f9t<'s2?E4=EHN8r񐍌)#2,g=k}rTN0Mf]1F ]3}%渕PWc sc#-?Y,b*-r~ͥɟ\YAi] v:*q+V^{%<$gxL2ݦ:ƻcK?| gN$Ejb}_A)d&4!: XfJ9Iz*&霹VY--ζY=r ,mwiJ<F7W> =h}fԮlhO(>Ltl0uT+~{wߵ}a84܁9طy줸-8oݾzpW *뵾G.>୫ߜ&%XQކߛϊ؎aVWqIkX!{҂ͧm=+;| Xg7Wt{Y2L*0VU!JyNt*AoFӝtYS𙎗Gt,J;)NOtPɲWlEd0cp6A% 'S鏚Z)j0QaUgRa$msɒ0K:K[grR BR}K:/Qcy(KZCLm@!fVrygCm*FjחOM.S:Źf>> gԥ?W5Q֨<9m6( 8) J DuD.A;9dEP=€k&^~{K uJ3)]~uӟn2MEIhOn7`N>G~xCrz3I.>/Zva(qͭ-͈/M.(I#+OH|VoYVKhTok0t>' ek"蓫#ơZ͆D|8;{+lK{] ~l.տ޼M;o ;hN7p z׹iZ6J;axYΈـqp;,Zt\ [쟿T?8XQ)B0hɹG菵W}ajs%rf=m)h W.h[3~YZWԺ~ҺjZs8e͓{gpaS/z^5~Iuus^z(5vx·~VVifrywV?ek+݁se%I!1Yŗb03X,/;75DI&?EIv$d@Z>EJL XC$]*Xq>4Qk Gt>'QX犒bDA:"B)(`Xq| O%uT{j]R=W0l֛\o(1Ib$iyǧfyc zZNbIJg0NEڅ@Gl~8.9ɔ"Z6J:KatpIDa8;#Sg Y˷Y v<Ƌbօ_kY_!ʘU`'8R :HccGm簣5[,efɉyʻE1ov+_v|Dsb +,# 9׊ȅ-d" -H%c$XSC$HAԺ~(u>UIT\sR9z0^I536sif\2NB3rᠸZŇϾi9ϫx1[|7t~6[|MWO9JbɖJHȂb iJIj@W,B/5pi VRaQ|j *m"Qd%[8 (F+"Kf찘/:NemۘڣWZVRN>EYyN s^qJ3;~"g)׬XMhc} TEG̖&e dhX8ږZfTg}c>l0c9Mc_}ʈԘidđZ_lA't)rFRG>pp'VYpñ" BԔI!UgPbd3!~d#-f#iP*2#29OsͬT^tyэ8$AS'f56 (wڈL5+X*dI{K"%Jy{ža38w3> 2I/ n~|GoY|l;_βɏsgKu]ZkAS*&](:Sw]YP>]i!ob'ZΔ@ƥ1'WhTP-m!Ǣ7mxG{3v4 5X}3C ]\H 1ZGDQ5Jm] yhcM1v}ؕpoԿ-QY!TmZ|Xim[XJjC (8~ZuΎX=ƯOƖNk)+ ]UBW}骢tb'CWG/_ EDOW}d\xdZCWC{-Jtu꣗ZFCW ]U*ݦ#]=b҂DW}NtUњUEiGu͓i@tU;=bJءUE rgrCWJ;3+lCW.&hu骢4c0,jNn(gMglMfIN\tM̀DW_ Eɿbsh9ㇳ줛x%,C`h/>FtL8p 4]Q1~48ep1yYd),J*5yz7jt1 d1wX@r vcHJdԽNo[\/\n#~I% W:Ksy+1?LVovĥmD}⮛M?gNOl'{G^=e]- ,UT :!L1J jK%# +\=QXZwQXQQ>QhA عU'Pj]NWv#]=C"CFb8YBnEr'Е3vEgúv/W,5ś7;u'3ö́e)zhT-sDgStQ ~L3uyOzuN\n_5?Nu)-^uF`JS!TNdlN@K& {Gy[7 ǫtu|ǿ/W7a;7H>zCIqwn|x$wwwq|/in1_nOGm!ez8CQ-~tc.ƳTu]Zmu?@ٶbo:iR^;~vK봡\=jlZs6 $])(Y4GTPDJ%I_wƆܹtm38Gjv[Ul4י;~N_x%M/ڜ?>KfϬd9T\KZ|X-Vx9W |g9>#7ݏ>^zRReN):z*RQS@qP|:nɯ2,V$|~rj5ﱖunΰ~ؓ_1W97b#)S.aMvDn_oZmjuooG-}_+-^Vl6}.:uwOŇk_{)m6닽iHXܪ:byp[_~9{d]{ao(R}G]G/ B[-.*z~O]zSK-Uxsp>_6G?=I_{6 |dGv[k=n7groG8ΑaX9{ nhkGeZpvwh,1Ĭe[:ĝŋViY[ߤ}A/joHԑ&)8#؋ؽLН48dL4(% E(JL XѕH>C2ZICm1㊒bDA:"BK/c1tz={=UAG:Ҟ+K6M3o(1[JL8RbK!,$ςOg9͂<6P6Ē4`ٵ 1wGяyN}S(:O^ujb!7IE3 iY\B*!!X8Ҷ^{$CĖ'G?NӋNtTd {gC-@0:$j)vcɳstu xQ\̺k-뫔=D $P#GjA`il Î_/ `Y~ji,\˭GV\W%$*;_ʴtۯq?`垱--ds h+D&d)l< )HilA*K$bG'%D 0D)D(os2Ҡ,|aj͌fvZ6SЌ\8(.0t*Cڼyd*nol4.8csĒ-@,8ԀX^Xk0ǣ!KEmO 0@FlN+= Z/usbvB̾v38mc6#k vg-bOAV޳ȜWGn t5+ sS>,Xi8Bfe(GeIYD(QRьlȯuPiʕˆTFƌH##KBb:Hc5:S?Ȃ5'I2H a8kc%&T )DRd#-f#iP*ֺÌMGW}lf%k̋nő' 85!MGFdYq.R!M[)(Q#/ /Ylj||xp N}IpcGG6g?^c}ƿr1G繳ʥuإջy0h1d97_ VPwRq#Q'vTR[1O @€`2yeB.H r@E$j3$E$ّ> R"BJgZ6+CU='M.Ij?dUyH)KPr@ QHm8LAcf=}Fn&^y$[T0s L]!H10(WQgͺ_ulW+uZq2/⦲Ľ/uwjvPJD\'2>_e09 X9[;GuGݒ.t濫ZiXtbayy DP7@ :-~ ڨ<QJpw.Hup=&6sM:@dk̲SY(sݛ;}7st~qi͵:_U~6xxcr*Oz>XڀgDu-h.[^R! ^Y<7/xr.kdΘR"Kƕ^ ))cFyУ~ 6]2Eo R2;G$(/HOXP1{v%y2축Hn<+i-u5[Vͽ zN"?Ka i6R8BGk'',>n5&Rr})Ub*QK10!$_iBZ5:^ jAAݾ =,>3f; BZHM-M ]:T./ԑqDRę\}e`7#&O*4[ .yE=(DL)i poΨmT%T`"Dۧp7}B>0}ʊ?&c^q\ZysߕKV}xFݐ$qõiH@RMmJghp1Ju hAlyJ/*D@g'ղ[쮣MQ.S>s KxfC1U$8σvVY^A=I`\ *H@H \_`Тg? "^onw ߃ȞRB<3p.FY;X~b!14a:OP[hc£U/o 0N{,(H[ڝ3&J}|6oP&m Km.xf(fa_g߻ .'ԍsJt<L<"EeG#YLa|rgcih:B ,k)wyIb-ga6O _a+\!t06޻ϯJ@0FV>:xTcۼMߟׯܸ$Qy>*'P3'Z,BEq|~>ś7*'̥1Ls$2]'EOz;;3w/|ͨ |kR^(XaevMRJuG\NEvg)2NjWg!@1Yg/.ʔ gq%Ę|3/?.unx3v\/W'(6)w >pU fuwTzuFaE;0Ճ7ܚ6;lo{> v˞Pxm:Mrh'X|FlQϨο3=6sbr5 ?4'֟X;/}x|D/kj90'U/{Az[pO sD6_|m~}kaW]YU[#onOޡT0Rxw;S.?ŵϝO W}67R) ,oid1e:9Jʱ20'&UJ7woURяͦՎ>y7]-b|JxIjRjw Pjޑ{wc޶ǼUT21MtO-Z).v5l2}<Cm:_QgHmL&FEE P@"0O4"x"1#l3ث=%_/QOj=8|SfEc-Ti.yB9Q(4F9m4*gw@׋+0|_6ZFMI~q˪ϸ-)IC KP+n:_ϨXOz«U9Lnxo`ԓ ݏ(ګ<\Aq|QR k}F_LK-97ԁFMm-^kB \- 6m^'nlxA^]e0 {K4MΏZ-ci|1;oh,H(㍴qWֺÃWcq9;_\r-f(E LJOjJPcSğ'jFqup4%"I;N .#D0ekcRsFlN] w9U!ćoP./܅PWe,V+HT@MrR s|o^""1Q.HVd|U/a7'O+܈N!Jc87@J'Ԥɀacc m@~/k_ݏ~]h+ó/bD6ԵI9zX|I ԗߩx9siVL{(39ZPƺM#:RD\ N5K4ը;W^\Q&.2 fr] 竸ؔg}[w3%3fh̪`6XF2Hϓ!(&mR}Zl|R_0]Vm o?~2 ˼zҡbIb2}8hD GR8K<+IŸ^h5카Z {6%a>ok˝LKDKRZ5!ڄ;_o>Ax?Zm5پ 煠S_̅SZe ݸͦB˳=Ę7AD"gd,~b|YʽXDiajМ+!hUG1:*"ׁDfu3لk'ޚuDmwbΗm˻dOV}` ),!.;djnnT:F}&9 ( 3.˼ߐ`LxRu΀ksmh ҒQA!E>@\߮}/2u}u HӡSJ-zŠYfɍVOCyu:AO[Mted0 ^(by ;-eN8TrJQROj8d'!2p(JF i(tpgq;庯9n:9&.@@WDDSKa` #FGz\m7'l*0%OAhôb*P}5:TZ(C3w_~lpSh2idif7%j kJi|$Ze=#9~i!g `ٵ0,Y3ȿAE '5M : WB6Z@1|`!-pI:^+9|ߨ좋SwMEwn#iB*-YG/U6㌧U\2d) ,2o\ٓ3X.sw4hTqxDfm':4ScFJeH >ib 8fsrBKQZe KK 3† D N#@DD˳*9 8 u*08sA\3㴋9M3'T*F{^TBQ8,anԈལfXDws5:?v`}[Nۂ3z)L].[:O^k'%o#xsA&^/׮rH},kVBZfVP褐`@Ef0޵,X^D]Kt)<Lg)%CogwcD˫ 7DHYt\4xMCGfj^{!+hnhdD}jUE(5 9}aځԠQ ʕFQ1j9qn >j!T:"Z:MI":Oɽ Gݗ x2f< '|[94`y%$ #6˳?/wSq/ړO#xeX4zz3$=ܕߟqz\jQiTšr1sAq R@VrV-k,L̒Amh,'/ps蓫#\&MuiwzASbX^N2hnmנex͕ 5Z;pn2w/(&]вN=Y6yX|Mbn˓Yb_O=3SRW@0d 6+V{zB*3TWp + XRq2 eTR+HR){tՕ0rrB 6dkOj1Trޫ種^jM.Ӱrd߭*?]ic%Nw/xV70CYRGHzjrF/zX3ؽQ(wx R;.oGi!x.G:,묱Qv)PB v/OD=ȕzF!J+U:36/-Ğ̅7{O_٨% ޗ%V Nbie09,N>nU?ɏΕ,s!:F2vB RDr?ЋԚHe,A(Ni dkNFj 鼺zJ[!)+$XBrR{ǫ+Ұ^]=CuebV8$W1;x \ 2QWV*`e8km1&@u&\>W/e2F@cpĕAtf[ZnIcArJƱGgoB"Oi^Ƿ~~ß8h ilV4C׵t9+߻+ $hY/> MUVkL8VNm1hcy2,ˢilx=ڔKWFF&fO,90{-,qq7)^('"sʣ2SdKggH .J$cvbϋW1j}D`!'gPۗ&kRKaH 'h 8,C}E+V\3IMuc]Xaܭ94,>  L󐨏A0/h4ߗ:\eRhX+,3#k.dtINht)2Բ &΢4 !Rig "yQ,#EM0jb 0s#0)Ύ@l]xP΁-"I)~|K/irz}9˨ZdF?n>Y>vv6Mfхc,wUy(4kUZYE`zbq>U޹ۋ&ԷVimaF_׏9xup՞cc_3q MQy#qެz XƸՋ'a:L72͊Er$r8pPLo]H0ϊELi?2%;X5|[*37/ Y-67Q (2DdL0'sjl( PR+6o8wizΧ3Xޟ-k"@s`=u]"Vy&m'[cO0mJoQKQܤTYLƫdF6@~uBQU cpF {77JǨ8gXC65u빟 !\fW#Tz¾CtdnaV$}Q^@~N\bs E>bEeש]2A/hXgnR$ FlW*9͎E.2pB@z-9)+G'< CW) 'aE 7{r1`V͝J O~bF e >-ܓ`Mdi5`Q⾚-D0腖̳RDJc,-&*8IUp-"@D,L0XXqI+KFό.4U,GtO6yANJ;aV(L#OS坕Jgf#9Rzch9<8Bh9jcAM4s,C2Q- Hku2 az勭 vቿ5zOOdS}Q:*uH!/M2@OۥgO}y C%%&Z)CTQgiFtṬP%9[$e-mfbgs^__a|7բSU3h#u E~7֢{7m]S_ͯhkwA9a5ykհs{MZQ5]׿ k[4v,b֬XW })eLj]MV}2?bt Xp>Q4*E Q*6* )Pjf^>at "kxi;3q!bb$ͦQ3`!ùdD @ٻ~f|GkB9sr0L)e@nXdeQ1j9qn >j!T:"Z:MI":hOA 3@C[Pwot=%eu+Մ`3~hrj Ѷ@DuWtn[Lt.P:vْ8SJfyІV^u"@$hL,gM$2xeo)r))\Kd),"+!d`$PO1&dndZi3k)FΗ"hrMCwNk8 ; M&ZuJ[wkVw{2q?b>T+yФ5q2i͍oujr,GZv6`f\:'sJJi'P4Rt*?{ƒ,< `9ua:%Iq&Y_.X(eKv>9%vwb}ɅaSN[hZI(F `5"kD5镣*bBk@JU&Zek6f@d U3?BY@I ʐkϐ8H1*jT< ٦Ja\?Q?e 2D:ZYȑZ49:45VfWTX7vS`#粸lJZ,.ӯ -&lZoUwոEs{FxOR*R8A"!HTY |vABJa]QXH`rʲeJfnƵ%)aBrQ@KQ8`x ֖8-c;6BWe $6\"-O.rsl;|8ta8f7n9JbȖK̊be*z0R2sV[3EA2VV:P͏{!KP1J%TVG>±/R H[/ugŎQ1j7Zmjjvg#*@!'/IJ R"Gn ?k=IÈ% jj.j4 (:)rlk2A$j+1rPM.8",Aj"6ZDllEY" @(.B4Yy0q$BTO.xd$#"zih%|N,\%+"L#i NT[fzM#YRsu6%E..vqwM9J0.ke raQG \6!䨡A]|x.8nV|> &}zeޅܛ |௸q"DՏN۞m|;6(ׅ#Yu UlRٛttI`RR}!SG!=gBylсsʡDTR +^G-r%h[3%l'[;Yգ;~&ږPCg߼׿c*}VMgNO'҂j<;#R]>Bǯ.!`skmY%Œ6ip#D A! @nj<HNKfNm!\&"ƾUOy4@42] 7x+!Չ w(ԳA;v:Q%x.|R1bҺt> vZ"aF!D߶姰|O'}VI c"exaQgC8%A"q)ɳAsxW;63^4~[[jKݦMw{AҽαGGAGH8yW1q7N_Ǥu4LJfWW}_=Og?ҼWER*v:x!].]]9 %'uϩz?Hw7>eާeGQ_c8 3 1>jQ#/)@ZcP|畇QBDéEYȳvs?RKV&5("-HIcbNbstb)"%/ Jldԕ ,H4T1$pћ`7jle )l#xā@yHpprCfݡo{Otj^cHl|M/gӴvP51=/sn i=f H3N[PI0]#c>x>_˲y=pP},I }gtK50_2b-| }Ik1k)BqTB()-~hդ;F6:on,Uڬ4rv >xqB?a~h'\JFCk<PQ8n:%,-ޙn^ȝƬ*]7 5=t/wm_;zqnyqf{fٝOs1j>dc#o69gwl[Yj0"K} tt{m]4?5/ڱ]A'XaLۉ2mmg'6_aSQNlѐuN ? t``H.8ӧܞ+v $oR|:1ڠFeJk·VPT{d 6ayz7X5V.Q6^ۋW-` SkJmMV=ݲgal&kEv"ZD/^v2y5apF ӿQʤL90Pb]JTD YP0CIE_6-]|^,d"D0"㌀R1l&MV Jpn3B [l i^d@ק%?;5+IxK!AZmN2yGGd!CЩ6*)gIԁ`cRqN"!%d5"c@2d3IȾU!&n&΁ԁCskJ4j4_KA$muRTPx-m)yE$xڣ) 67f(#ۆPכ3 DXʘpeD*a>2 Qʡ,\UhZLnuzotN:&cX숲+RB)k]TB&UH*$ H:vF:6T {w ]~~VMH0mKrv HhE"v;MAuQY'&k=i镬Z,PEFd66DFii0d 88HS7"g8-*5 _KGOr_|e7r}LrN*tgm >a,^h@N*"^y( J,-]6/y1O>7V &:{f]&pd[88tN] QvP&QD5Q|9Xv1|%Pƺ :Jg*fdʩRg2CFb@5F!2!)(X*}VI#b*2FTi =B焐 o7xfNc֌6MJ<ON>'>N`{ {jԙ/ `P2zc0LojB=t'Jo{qiO=)wzyAnڅ1ƺҚs`2%M*b"ap7/9n{^pwu,S펠'|-o{?K_˒ٷ) 2T%.:KU )āP(TLo"Թƽa I GtHJi#&I)`d#N2J1eSTbx\nvH>]~Ŀ EsEMTb*/$ݓf&#VG[SRw>﹤(Z2#^o $w{Sԭe/k[L~>ΓW&˚OuIfSz<2dRZHd6zN Oĭ6u?fbDrdW^$c4KzJl\Xx튗4l/Or7. j-v@^ *T\.d/'ݖܢf Dcyl*U!qóDtgpVUJa"Lt2DQ;UE'2ڊY}3;bX"%  8|YJjd\JMr⹴A"lPgU,z;Y`oߙm ζ_vobߒt;'ίo??%b\Ad\\nZ~"ҏ:@\)f5 k Hq:1t\JF\"F~-]_UKjz~'Ҕ]gٛ |)D(0uLc7>9/M2r~Y^\ lؑ bՏN7ucyC`PLroӤ֚cT:7b1q@O:.',_W} dg>z3,[ m^OIwQ7'G¾0+-Gz沒r8R 8.Lвxihx}ū2e}eS)eԇ\0L'Rf,]#j䪔u-:UiO"&%-`2\gv`/q !0ՊSHjcwR)Sh5+y3Br}3oF,׌ˡTW+ǭr-yWlkWnWP:wlqu8I+юwEre35IqJmY͔j#X ^rsU/j_z{Z WԈ=;3fp]VpEj:H#WB8eC"^7+ȵ̴+R7Tѻ:D\Iɥ k'$\+R+qE*quRRz) 6$VpEj+%`W+זּ32)fg #fR^6Lmn-p-bھ+L\+R+1M*1}iF}iF'0 MsΦ*=xe}׆m'zZt<Φylm o5m+vb3ѻ}'װ⪟ZTM6=pG\=9-Nל1 HwdJj1 H. 5t\ʡ-:Yp5+VR4+t+"ZʵTF\4cnm!'ŧN)>s˝Vn#݆[4gX hW&RiR1M* 1 W-z%l}7[NY$qbM*eX5l{ܴCc$w0S+?T)HL;me7&.]ڌZܾڵ 7|qӰ 88-U;զ?tTZ?:Y0PpEiW$4+Rk+RF\" WzBrnWVkTq8!{EK : 06t\ʡMq\-1oؿwOW=Tj6`qآޛxK\\ HaC#W EC`)L3"VWѻ:D\)c zW"J+R;|S9qk8ـp3鏜u)}miώ{ !3 IC4\[%ba\3K-v1]bǤ7AKRLd٠e{. 9$XfBkX+N!jN!trt )ܘLB'ػfp7ÒZe+Ri숫CĕYW2aF+uͼȅZqk{qu'̧1Ϩ^Eatdq/''bh׀uV*]8o?(Ӭ`s_S_Hg+Z}wYV'Xp(/M砆?tJW/W899O*ērtqs)}/oy8^^a,vONB1 B^pջ O[EByi7uxG# n{P.4?;OӟB6}of} BRRhW 7Ny蕑,%qăT8䜥'/]R1%azi}w˛K4G߆+5E2PPY.UxbjWh[Y}RdQDRqS<#Fףr^wEB,dMhm1'feلPvmlD"5gǙJrVTa{Zl-*A"kUrBBjw .RB)PᘋR2JIƜP+U@2˚y1xFa6E:T} 9^dKjrت̪,.̵t W0VGiI{e [zHB6X0 dm\'Sh4xbmR6c(SJܲ2"JZsp4%)fUzǪ1\<xrV?ACnq1 yXӜW&DudP8BjsA#h=S,XGMbV+rbh[<\/u^/xoXk|-SC*|;#fi8ףӤ |fpKZ]b~)bL䤡1&PT6zIa8I #N&}r;l[xպ^v٥ nUAӮdz|ou˃4$M G1^*t7olJ?t,*t9B \UFL}tL Ük=̗9(XX9{PK $ 9̼*c>xmBL04Ż6:,zOy LQ}JA&ԭTx3$nm }[US<U#*Ɲ(@p%g_VA';=n*BHvxy2W}LW=F-B&Ȉ0%RG{w90x%@_>LGf| $$AC)yY9h vscA<K^>^ Bٗj.D[&L-ַ;ioo4K`. R mr`=A;+[| y5+"ͻ2j\-:r&Rh0fU:9 6I\G4IvPp$ro1͢Qj[Q)+֚Bs$<4j$x31ie:Oݭa#[TfĖT,2% <%f]жC*H4r;XL] >4&tB)E#6}5>H̦yaA,+(]Hڈ^bЎQԠ{x hNmU;3Fn/J@ZxzPJQZ%TT/Q1W8-lvw t5<5JS!*Z>@MtkAڊ^Vq:iQw>t" àe Mf|2).$=4%aFwz|dt(=jMXOQp /U\ВFn5 …nކ`Al*\73,H1ЗC3f^xЬrAp$=8D KzR@ŕ0Z 4gy['t]gF3%8F N}TQzm~і{qr 8 KʺektW{:<ԭi*a?|"y(Ea:0wNfI=7o<{ L|bb#)6&˜.AM9紣?_.ltvUݬz~}m4v7gz^_R3?r!m˫7RLjt/嬽޽Ytʊ8}kغ9܈_Bٮsz2g(Pz2a|HNjx.$Tɩ|BF2=$ЪGԆ@r>|ٜ$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I N}I /x"@ ?=p\FZ{0J~i&~$('8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@_om@OpMhC<$& 9 tI$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nl(?$c& DpKzyI B 'N0 y+$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nqt@nNY~ uTNW{6c5qj??]҅㢫]6TD9"`khPFtutwڏ+X hNWRj+0zDtECQW@+e8v"*2] ]"#+X hNW$ʢ2w. luƒ-<]tݕ;_N'3lJ8[]#Q2aVOrZPs4wd'D! m0~x _c_m8zL(w֙OfW|e14d_Œ(.jQPQ=I?&:*6Wst󛝌/rW]IEgJ\qzk[r,[_^rpksSnoz8d7,t-]+x7Oh߉(|gz>V Rä#VԜg9?9> ox+swMU-J5]A%[#\DwVc8 z9Nh{tBi ިhƴHwGPtEp ]Z',f: I3ݻ8f,ts>pHWD= ؏nB//:]ٹzt5vLXi?2s/NI9Ʊ=~"F0] ]i6#+hz9"A;]NU4N mGCW׸uGOW)ҕNgDSiϬB٨=MQ׌f͎к7Sv걒E8oW%S BACzSۛMiV~wQ)YY`g ּ iz0MtZ3).:%bLdeL6Z#rʶUC43YĮ}욢GNL9D![=}'BB{Pz2rл嘮0}' ±TLWHW+܈ Ͼ; 6c+LW'HW9+2㡫8DBWV"WCWPJ ~Ԟ#q?V<|IɫX4Qh1"`CWk ]{ ՗+ý#+lhjBW֫c+Byd'c2tepO6c+Be]`73HpcMpBc+B) ҕZL ;Q}f[}{,hٯѻޞ>'zzg47>g/.f(~6//7䶁Y4+/o^eTH][xeAB|ojm:姠&IUYƱHuBk^J ($M6҃twW!4sTs)mPPOC!^lm:4Mm3iʳ'zq:s5+ +Ck7mqoߛ3ߞiہ7&u.}7ϟ|vSS~OWu '?,л~ZHV2<:8sπH?P]G嶹>*>#z_7گOÉQU#r-:٢TV+*pCƈ`sի6܂lOϫn|V+2_t/͙0(X=Oʺ29C6LmZt/75Sɨ2)5SGCeS?ڰ"q7oF~kà'd_zX@ܾq\]l\ί^f/<z8cv{2]gO:nʨΤ gR3yۋ7Gjvqw=~؊S245u^h\syr+R{TV83#  @嫋Oq|ߐ1[\cp I֦$O?>y~VPܲe0 KC=|Ԅͳ'gVܨ[a4ؔ2Zk-|N?_. %AX?ͪI>aD,- hhjrlQTߠzSPJkѬ}16#]3cY2rg$ߨB7u`x08V`->"vSaц:ZIU͗u:{c! הMmŬQuV¨*o;u 1*[F5ߩ=ѭ$l_\ӥQ=itPϭX:L Cpw6XuTǣ:Z+&diE.!=ĕ2ZF5*ZUZoCꅀXךjuٻ6v$Wq^.0X f^NdIGRI߷-;$p)*~U,~U$FAP1ZrU%7Ѻ(mcL#73 N5Əw8~s6˜! & ` ފN3Ȑmy Fk<TLA&HGV'QEcIScEm&aE5=2,e0֥۹l.4''n.l 'd~]wl[leb@bX^,T8% ՖLkQ:Pͯ:{!KP1J%TVG>¾/R H[ouŎQp1j7Zmjjvߌ'#Щ"rK!4Ȟ+dZO0bb )=dFB95 YIe6/a ٩&Bc{Lpԏoi:d\,bqE{[Ľ)"D7 j(L.DvN5h1g@@I2M-G`fߘVNRL1&KV8E{R;ЩVjls"]ud8VuNfTE.O)GFCҥb !AX Yq>P&85"/b6'͌/`O{okm6y0.q2bxD\FُF8ͳǷnPj5!}QZSg7.ىZIդ+cCLJ|_g5Mr~eڱ)`W~FՎF;SFz?jU>~ϲp\[z|W5]l}Zoi^ 7̯/j:\]|\%Ŗ1uGkS^[BAfc2(Yơ 񒉠X Az8 s8V -p t^_W^36w7:\{tF{GkX`OŚ*bMsXS}-_bƨ7i#{٢^bd|N);e.ԺTI['os!>d~0d'^_c8b;oZռ0pZȋ$r ys9dp/Qq9x%ȟ(K-+$|$1h'l9:o*h˂n"d'u% !B $m) b&3؍rp&[GY"!em񊸋NylG+oFdNkl T lLia˺8p4d$$:#]СӼ&alt!X"hw)糤d fxF]^˲"p>j(xNf r>Ѥb/>. o n+Ne)S ,}+7ndcb!Ŀ^G+78C }`dè2Y*Nc(ukKi/vﵛ?uYonS{W3{{ܱxo9!6Ϋt$Tf׵/6T>lq˗[wzul{~فN.2m.:\\z3 >yh0JV9Y#\u~Bq P{\-J c3Y+2%|񲓤Ǥƾ$u ή3yvpNEV&`́aV" 2=eHȂYJT4w&(>-jBj&eڋLF$~P[#3BR4Ɲ9ڊAi2ҷ {FHdcbUA'͋tq_'n[ow}TI %Xh$w$!G"8jb}dnK6&ePD RBVi'!IF^[ErkRfI#9'MDFx] "i3,4hi+Mλ\,*%lMa_1C= .>츞h&R$$(#JW %%Qʡ,\TdZLnmtL(Dey!ʮHU luARQ AT=2EJױ3ұ)J#е_:? m`P|p__Ѡ\nNHb).*:+@:vOJfKN}("cM2YN"44sr±țPMV3*5 _K'[߼zڡݼ^StA ]Ym¢O ,Z0Sȥ)W&ʻBo? %iȎM^f/m S=BoFak/;uLÑnL[~[: v1DAMzx5^|=X^cvJu{yt@=UhoSY\Cdŀj 1B*eBR>P/0UNk$$GTdJg500zj !H{̜qmwWYmF<^l7!]*rГ뫳Sgn"edNj 0 ,}#\rY岚 i52&t䔲P-mDp HP.AutWZ;nWB؋=Lۿ ܅eyzϾop2xPIJ55R+Լ, t "IH!4o{0yaJpx!AR: y{}${PIieb. BV8'J,Rv{sZS,I.vgF%Z#es1Obnn>l5C^·;M.v!ܹ,XiΏ`R+|Ҭ/忾woڅ1zҚs`2%M*b"ap/7/s4н֏ OX'_ |]-ow=/mM˖)b'2T%.:KU)ĎP(TLo^"Թƽa Û ;gT6b L9"$S9щ>;Iv([?s팑|>xv9wKT)zW_:@dL1C$JB9 1/2EaR<֨C(, O3ίsozvl'9AYmLMA,h;a*W"wyDc{TҰO*H&hp\xZ_(FǘBXH!ٌ[/! 9d%m$Enk}3?KQ>;tX8T0tH?Z{vxXuYlZF?v7V~r3ΐ,W6vҕEȗFhBQ)\pQOM}o?ldq:l2{F>D'&5c} 2,קxHrg\݄G|YB: Ȍ_ӧJXAd믮ňxWxiz5bJS/4be0Yt^L8F43 hg`HPp޳q$WoEΫ/sV|.ϝ Q;Kw>~Q'M@x2q<1ՍjPA`@M\w=kĜM(gqfkތE5i\M^QNf]b>X=b@1(67mΖw~n]6~6i _);d4٨5yl{z\eev y.;nMpg4efhI,Fk7耼ɛtT[4%.7;Ńq[̒ 汷<֦\9$QoXJwֹb]UE3R NDS%,:ro>ޱX"%ȡAPgxIGZ(mr]K)ILNmAؠ&8U,=rVa2>Uj 7FKC}!?Ār ʅ;ϫ^pVD7IgʡT\3;Zwm!YP! FsQU3+cAT||?L .9~7.Q[7< \k>Av> J͖at@fD)i9H/?I=8nӆ^͒l&? A@.!4n綇'oȋey5B X{W߷5>i͏g#o󍚏V[3[T9W-uGbcu9\ȗGLσ?qovFN܋&ёɘedVź:H,O6 ꙚDo-v ߡz~$zM>PPAJcJ0(iYP͐EJK,dTϨCyo|]74ٖ퉦x<$ "X*5d<fx) /Dd𣨄 ,Øsٹ)oyulE5 ղ$e*Y< P`RMHM`ߒ=>#8>Rގ6McIʠ`,![e-=? ^hJZY<+٤mJ?W]'TOY46y88Go5/n71S_Upd&UADlqG9L'b#o^l[|74F&[E)َӚ;BzcrkV.i|lbP`Ȼ/9@4x|yjO?M0i>vbHNWQc:=0G:3k$0p{ rllZr|!1@&jup;;7_Bw3TzG\)9]lw@ 04E3Ǔ_{||Ozc'Л~#CCwl-pz|5ﳖ,;R#S/gvՋ0DE@Te"FE§j"GbpD-7>p/[:Wg=l@5s5+Y0uN:?Hp[ܸ7R U%hrT+R}; &/Ad D1vwKS->YB"mK01GG:*grj]HV2hR2+<}VK0;7Zz,lk%X^/V,mr–?KJc[xT}|j谤2ÄUuFzvo?kd* U4׽k*;(ϤOy>>!ŌkTXH ^[TY#ۭp IԓzDWŴË9Š_'?w yjʶfZE2Vk7#^ێwYm{=V\!jv-9_<ۏo0xH2jk܉ze6|lxQ*- 12/Љ 楓^hDZdr)I^8:8mմzvlZavYUݫ0Ϥx G{b*!}_J=V]GPJV^;}U?78k2*W0jRq#O9u^-h@G7C|^6f؛Û\^LuSvy3A͟)o\@І?+M{e?Qw7}/gȹ &_HWpi[ZvћWap$+ +FXyU (eqHRa9 Aj`pD%ݺZkK8$UQ%mベu 8p~ȸoCnC5Z}t1E̼VIڋ3$Ewl!ܘa$9ɮ힘-*@99&jc%QI* /DB.6N-@0\t䨸9T e< ^Gc:3p6tiɲw(Ai[cJu_tRB dxԅIVRkc*B~iv0ijBbTp2kuқ&YKȞ hUTZjX.1mYSB,PIՓѰWl)ƺ WUAGV?G^/Fu/j!H8OLÉuT?G+BȈURJ_)g ar B)h7Umu - 5x_A[mðk]R`TO7ze.ŶEZAZqOOJl~G?MJ΋???чz0(!~1<sU,1VVZVA 6hɹϒz&F/8ei~b2%zY7D`crRP+6הwB|P(d uy90@VGͽJсRH0Eq3+_YC_]2y7M,Wv\B`إEz.ƣ0_r;I=[=/(nx=r80J 2U$t cIwAoV|clyGUgXfI_"ٞjjSz_v] Ȯ ؙ/8M'H "k#:jH(НKQ (BZWhboِ|{&3s:y7YdߙnXyR&ZJD?~ĽŊsTO'xtCrBeusyBnm<;)UCW0ۻ.~Dn. 86oRZ_ZWۻtffm?ͺCjYQ9n{zoaZn`K[Vfh6{LY;:Eϵ9b0^-ʼ~hot{"'Am^2፷ƫ1i[o^]1Q]/-9o]U`f #g#зAYoAL(X(ЖanKFm&1* &R3hM2"D \L &!ӖK>'ͨfB<杷er7qۤ9,Owe?οn(]\넺n_15t R`}Uv++cv%HnǻQQvp[g6_R|NӟxYWRb^^C@y̪i1 ;K<c|7*r_[ աHeu~Y0Hc*L\-E3?{ywFa /GN<,!_JL$u\-qV/}vIzs1v~#ߨܧs˛R~a?I3$av>\M߸_ܝ_D@ޑFNd0y׫aW=\B,&=w=UÜ}bg5VY1Mju6G-|(q{ը}x%ZmUcuG3v|d΀`f-(\ں:Gؚ{X5 HMQB8PrA4GJM3OMKM3bó`s2fdWy}t KB;V8f9n섍C`dMΗ::r$ SX-XF!nc_s20<8Ρ:1Ooc)u~qesk>`MVZW̴2iR4_Sc>ʤ,~PϮQJzf++'6 Z 8oHw|Džt m5acj9sd4&;CxR:jʒ!# CZ%m Lf|URB(ЃSң ;$'N&&-к#X%ka٘8{?,i[+ejE&ZVKUѢRﲸi+y%y`W1 a 1"bV]D~~X;fib`2(@1K`vzDHF2@[g!Sc= M4eM<ԓue3A!,9GseJI/syafU(}0HzkS;O2_/4SϥV@k- 5 &@Tio$Zm@%2 l'Rj xVC/3T`"#1Zʾ=!O蕮߬Z]ͷ0bk)σ7$LzX :?Zd jܽ'Ͻd(Vve8LUOEwbv}!\d\QTT1 Xy۬qw餤~ƅ;0̣I>uVd5%fG0F Q폸=j^U7\"fp[F!S&NWd*URyj Ytx - "$0ݑc/9@PZ 6k0Sk8E:WP 8B rTa9T+=!?@޺QX\N&]ߤd Spф:)b/rl_>btg_FZE0dC%`y_Z$+0lW^S~oaxHM9͖\2"q S;eeXy_T/XgBVy-Si# 0{M:Dsfu49i*hSPp4S1U\!n_? :](B`J#SQQ4|2*'VfO`̕\"uk-#ug7Exmwч rK0%CKpX!T%Vۘ5E̳r4ΩDD1c*8=*؊WUc@Vq,U"Q :@eJP@X܀ &ڴ*8I6XF t" RR7 bas5SQE3{GV)4FƐ:>z9Hq, +!8Q6r6f8%PE,d8HG BQwQL4-sCU>e\tjBa 4C!EU N` hb!i0`NC鴷Z's;ϋ MWt FqSHE=8 R>[qe,:u~u ~:S<`s-:24ݴy"NYz :q+啌MG/:þҾDׁ dk8oB!gh՞2#NMebaJͶYm`9_ e8`K]7!L1h[VenB/yiL5S he)R->vq_5ڻ?V`Dk@/A9SlkI3]uں&\ndzMㄥgb*+^l7P@PO+?t؅Ónބ~/g^~ Xy#d,LyJIogw}xY -v/ӦɏP2Õz!U`J# be}00+―1Q*t7a`/-^cF_hWo:ke-e` D BP5;LjnDh:vF:x&9qv>rV^jYiٔ`v֠Y h0& f~bT-`^kxc$a`opAѻhUptY⬌ M\ y0)+`O;L;?A~5|ȥ+?ݍZK*F)F 2-e)E?6$g3GĶgF:%"ק#SS%rg"FGJďGIJBG TFѱ"e6Sr^[dofs75[N!ds T@o֣P]s L2JtnQ‘"Tۿ7XR@0Adt"SQӉZ]M'*nkTJm,ZL{64=o'ƃĒ=߂*uAGU ufbJ~ɭ%װ} ^g/TGWb2ts njd@US7*un~ZX2@zrDNf*:Rjsfȵ1OM0&wPFZWga@Q4|}{gᶠpx+€4jܚ(Aj"XAT  &*1o+DzJR4'd DǮQWQ])"&rɠDDRzJSL89!uSOF]%r.'j:vuM·ȚSOZ1`ۿ.;*,{0,>K7vE_O ,Bh޽xɵ1>gAXs O2=nZM[^zeɮ/"+nhR7-Ⱦ~֯=4,\ɧe3qg/`{ 2HJ('`."o 4s4\ywږbNgA(5謁n?^,}0'qooaޚw]Cj6] E6P}cP-,1, D Jm0 s-4ҲbLRxKwD˝aSgU * jM21v3ǘ14̪Pꢃh:D!֑?m#Dr{֣L|OfCnF>b#j%>PhfT# -%Ҁ]I4ݚh~d|cosjY*ԿO3|7&x򍷢U# BD#`TZ=H1RZMøF!,T|覯jlZm>:~!`.n%ajFt2L|eQVOÐJw﮽w5Px t-Z.*݋C1e=FHX|8fk-Ac ̙.`P/(+avuӾ04;vG,)g Fi%p3mx H!P6T mSZBJ<82c%v,X)$h=J`J ]\յoYHʬN~-n{T4gЍ z?.ޖTmO~—_EnP  SFh|v ]͖*Mq#56-.|x²0\BL^oKz-/_&;Vqa4!O(hotI.^*|/a?^ ξU݉O'%3|0.|߁aeʟIJtc ۓޡb;hNoM7N_/6+{&; OmQ/!UI].Hp1Lwd-KΈ`AZu-׵pm5"w ]W+N[lB\:$UXN. r 䭻X*i{Zr2 *.a;)b/rl_>bϼf-Qx2, ܎LRR&9q̰o^ޛ`gxHMܙ<͖\{e=yް>\$nyj k|;=Kv֙UV[UۨtyͳF U++n9ZVjά3=aeQTOF!! pyVБc"42jA'!"xbq.m!\i͵8^m^INմ׺ZVwj.A6&́L0sWqgj{8_!v~jn ޗ5 fL Iٗ,o(Eh%QDqbqLOu׋-]LΡ Fŧb% ^H,:F$bI囈%ђR1[;6jG* Pa}8+Dk`ƒ>fq؅1(^Qc4{*nkGrJBqAW͉Zb.C0y=yc\"ku$Q 9;Tu`M 2rCA;u{pBkchis"$(b<~BZJ ~8'zQ9-Υ:HY;J0Y&X8BrԊGE#X=@Zs6lQ RB#52 4KUQEB2A`yT4Ӡ%J'Xu3RVKL[r>~9煰%͠}Gw!4xf|os^gc^tj E_r1?[fe$tV](~˾$}~4gԡ4r\%_k}q*V/k.v8Mqs.~ۿ?{3[l0Yo^?`+{U+zTWu[ƿ󖮯,^36>խ?uM' oLg(G߿VOϷsgҫ1~Oaq1n^}f{\&{yR'a}{&^6, 8x[IR =4mtN \Y ;8m+Jp>̰L;mPY|I kݶ(к+Ugd= +\vۆݶS$fO=6iUɃtI2T0EIͪ/&)lHmD"6mJIIkB1S16)$(Bx82Bw7""֚zTDD%T{뢯_5'֫P?b>Z>̍b=qA@ٷ؂uzL*$;td.؝%Nq`&ROҬ':-0c-G -T1hF+PTAB0YMi83B; h80rCBv!+lE(^jY)Y g1H ~V18IZ{M5Xd]SCFjs,k(!e[@߭U)d#J[ ZBϒƆL 5Wś3-e|hxɑa۬E>ov'_~/sPY&9ׂ5dTJ:_4KdGP"BLPԲwe0MNz*LP>S-%ӫXZ#c3qGv\6ӌc XF2㳻EZ]<0a"-'75dX~^S6(H ũT HZCZ{g΁ kA$u.oCu")V653 y K+"GĹq6-TPvھ1jyzLRPF sNѰ 칱1(6|"ge)AXPxX1!XiCff( {IgXCY$8ڵY>C$v16~<c>\T~a@&'+D,ZjH쫁Il }8f$)̺mY)}l8/5yJ.RbOZ&*8yQ#bJ{ʹX\ q1 8 AU2R>g$(D S=AQ.> . 6ӎ#p;9 a82i я轻vNc|[oUbOzZӳ_/.W9Z; "j1@+^r}k  0)t2`E XY$њJ*BɗJ1&傎ֲ9ޮ@ŎD9K0΄>5y.%U,lكsA7fhg]Yw/?-bˆ$%2fGI0+)dƪcI-vqd>x*i2aBſ}ǫ]fh)ޮ_0n3 ꦒW5X`ڧJ [l._usQ(] [}Mrt|R{3PQ30K|KŰu&?8:'*+WU964}nް}b gCt6Rhjݫ]0[ Mh{O*5.NME}oާ'_ ZGa4+i_σVR9~%]4Ј]yR>5Ym5K;H1ҤQ=ku줊Y Xy6Q2!nvyHER {VU(eQ *dw緝ΨyNW@AVkő\ȕq9GTgw[OGd1yz 9-[U2?@>u ]X^?ѡ pjijZ7]j.S|;)}~c=n=r5x}sPl4Rj}i;&O2wFjSp"/ 4PSjʠmjL+ʴ&jZUL/̗ /jhVU{MBe%XxkKTc-YQH # QE,\h *QF4orB:3WP\A{~ƸFRhz)4NYd 00J[PYb^@/@?{ŷx`RnlqI#={ 8=a3_3R*)r gJA>"Fu3;-AFsRORTG~͓*mGOګ~3OF|ݏzb>tC,v0/dy6\:G=9gtS??ɗ@t#t CPb͔Q^a'R5s&]%Q{G]S璥52#'K!^J DK8H(Sc {u4D{{FY; Z$Wʽ3Z0 ^سEޛ8w pyv"UkYvk-Ha]H\kN̞<Ξ>r&JL6ɼT3L>{5!d PcC(uԤ$2j..>*y흒HC=)yg 靑ɄF#KYQxFIthݓL"P4( GE !I% vԖigMKr vL0bbD{bD(RK6*P6 7Bi{y;bGd@J밊a,%; )d4MKc5:Kϫj`/#W}ca"/#1Ґ)UsFpbՇ'ϱc[c}?6ӶP<ہs%wF)oo[~|?f`рmP% S gڙyy)gJ(҂v9xzd<Ni)g$%LR&N{h`༌ [,|SW_Ea";, Uk8©HvƠ4AR(8K# #ɓho/Y=-tR sh+|پy&F쨝7[$ aN h-EKf-D!tjcՐjS[5ɬ ư"6oKM Dr$, -qSsX#O9DŽRn&xx2J#"ipqGkZ 'RHDcߊQolW>iH^I?A;pu_}:%]+gM<꣔ĺwէMX,l Q9sjP$jH ARHPkjͬBvqw||Y^,a$$ 1@'AR*z$ETZ2jy!JKoi&&;>*Rr (5i6^!BA/N;kH)N)bxЄNew2j-DɒYK*B& $#8B`?W!z(_}W?+G*tHpc&q:i5-$%$JX H J{d*IIk+-0R_cI \d׋*IPW]O, j,+3a +R ɒ"joQݤ'ªL13_Q;Q;4G<&2odf !33df̐RS +33df̐23Cff !33df̐ٲ|e}YwHAh;.JHwvdDzmNj٢%cQ7aPr3iw1Zõ1kiCjf¤I#'Z7Gd;M%ND@J)l2ͻEin0C >Oim~Vy4:Z{VP\L>\ `DzER94JcD4:Qa `ctD1Da'r330d`x`@A+%MʤKi$*GSs#aoU`XryZd%8ӏnG|x}FɻS5ӆn('Dl^4 <ȂN"1^}D1qH),ϊ1Ō%q;Of>CZҐI<+ @53(b!Q yJm|RK@Q\dynEN{;u3#S`IWV =W İE?yP;8q,ɳYfWuUX3g8$ehPRvP0=I_n ݬӔaDQՉe\GEާրl[N[j%;b60u1Ym Thsb'$!yrի›8-y5(vw(YJuaj Q|Z3/aCX*gV0v;xFȓ 3 \rZr'Y%k5yq|j_5}4Ui|ڱr욅x˰*wo;n/|[e/WľŒ>[*.SH8os"nq%_g3mi>$a>b8æUFs~zXr3#|?բy6ǼTޢg[NGd1ᚵ~z Dv>nmJJ$V)tay9XaR'>+ڔg]:wfAYiW2ܪvo46D[􆵱,|ls\Q6M K7./0/}5T]ur>ݱL{ܘ]w 5fgڡh2vl !J5w, K; 4PSj^ZRwMiDM*}alxQk7/wKc'!Ȼ4ݥɫ売jS* 0 EB@EBDT-W,:x,}.=ͻ-lO;H<z8ƴ o$aJN,BAc* H(mM@Q3gz4]XNl~H[kw[n]oIrW;RJ we!@cяj4%}W=CQ-2GXMq3z|U]]{>pK 5Ph Go^>@n24ѮhŶӽ 4kD՞Kh1+(3&tAe%1^̎hi zODd>s%8le ɤfQLY`l,D!3 LTltb:[&9;Gv YR!* '$Dr.Dk@PrV4 8Y p0&&r$ ouIsbgW,M0؄+1&I 䴭?4)҂#ԻVև?PdI%1KA8""+Dϳ4N|;'~H[W`g`!=/K> 6:㉽7䓧)I'EjHOI!i0lLYZLQ%72)3&!5%ۓ}wYcQΆ?r>N`|neM-5F?̖4/pQ|m_ q9bq_OW_juV< RhݵC: x)KYejX=IHX0%_u?[ClꖷjQvt~Q{x[RΉݦVŅ3'tJ w"󦽰\҇8[GsV.ǨŰ( Iyt$<ny~]R_3I^({?Zz3~cQi^iú6Oam/_oOKQhD}~/f?f,sUn;AMp/ ܤށO)(ZkRκʼn¯ $KPY"50NHՆ }tmQ+ Bp.Ю(В_9_@IUR])# i9\Mͣ|h&O|h֢Aȧ9礛G_K ݵ_8L%0QL-ry@輍KuHt(;)QU`x 4^\NJf`nD PRL`+>dtJN`]UښNU)R4L NH|1 8u&VZ4PnϔHdB1"SBR.ԌYHY |w+^s9&N𝉒1lT79&L2^ w|}DO4 |ׯ%39ba` 7u֗ھm?rb !bd lJ6D/d\r|%y/Q+d7N7o o~w~uW^]DO2U8L>jE"NdGmh5$@ڤ|Ʌ 1# T$ZD)ccFK,KQڙ+{9rk8VIU`cl8TGn\cu6Ellq]HХ k )*$r@Ah"C>AhD)&"vq,t<^إ=|}Gޛ |௸ZMՏ;~yEwI+ˊ>,wΘ&%bޤ+B!b$ A)HO_8 !ݘKƱ&//4A+Ys2FV8Km!)a{oȝU=o9y{z+-0ϼY )8ڤͬVKtR*N!DG\Ҩj")l<eVрBkeYr6(֑(}PNZ`dH5JF]e-\o@v\Ôh[}͓wSw|ժ]f2\=(nbŮV0k2R.VL KHc;) d&'-]؞4ݞ7Pb .G$y6-("@2,2&q)'#)eQD5a$R^AɀrTgSBfy&cjg9*5gWfB)>[hA2{LPY|@H994\28=v'm&7CL*: a2VV)IP2ZJr%rK)J^i,$ԍ^V55فk*x\$-zl8Գ^ Y/LJw *6TRF6IPY-e.AdcY"P!Ru{^T |gP?NX]uvP;-(JƤWPlqdbMNGThAt6)N-~w Ql$iLEuJ^m BKkB  G1^dR_C6i Gfђ(/l6,BrVFwd.s"S. IeDxl`h&"cQK*ψт"ע5,Vdܸp[! ؽu\H6پL<#M$_zQcĞ쏬"YUd4Jd:+~ciu*4d<:9nMqb@(""\V tEY8$&'r6Qq~pgP+~]|O_.>/}!>-E:^pL4ϲdxW 'Ejin s[&%ָ4N "\񺻃". %f(̺su&o*]"'Om-.mmӲK!E[|AR9n 9Ya,PGKy-gK +3 \f&niu\{BOV "zEɧK?JfPgO@& I? x}8o}%ף .}=[cͮ,/Yܜ`wh75|(فSV|>p[*TLZ3Vl4J{D&2} 7K1 ?b뻉Oؿ?~~h<.MTD~" S2| LQ9PggagiU~G06.1ubn$pOH܏F|n΋76U'/bV3/Llj0(Eo4h+=/~JYaz?;ҏkX Q AdyL )S/Y)|bw n sW20kJ8R+r)/3"YIW1fh+پt#9fq5>qwXabxyB Inʃ-]нursYqx$'xge0h[sY6RQ"gBx?M񵊴|*=÷2̷n?v3*zz}-YZ0F|au::DBF"wĥ6cPrKR]6ԙL: )Jz~N AzjddV MUL[eJg;Πp[_afꝒQEs0*˜hc2+ 0v3 e/B*yef%++ݼ`Ys_ 䧻`0Z`z^>pIgr-:|/nO\xJT(JRIƙBÊpfνPo&lc&Od-7Q2>D:51WA8k ߺ7{GnUy.ݬ:+|4 ` ]IWY$,`\-Pws eu=x?i `ـysg~vrw{U jݴDi6^{[m#n/g{;`Jnث_RVêwi Цc ?uyj6WlPt CtZ~ztK2ۃoP!L₥>ֽ_Լ* Lk%ׇxH}oX7Wj%v=܃V?̲TEEl@;?\y) ^ӧ=ɝ)ai;NADXhT8"Z+ (z3tPR?=f5ge$Bw=]!]I"Yͧ5~ɥz_U9;IW5k2XU *LJ>$<+?zLƳh 泟?#SR"+aeXzzvFa4µ444}>4m+]]^&Aԍlr;#dwbUotO~\૏ŦFfB6}_|P[_4~wrB'~8̋d~{jLrê^HGSOE4MK&>pioa.j["U߆^{Z*{mQP&2RDՈ(DbZIl׍BDY1 D S]!\ ]!ZźNWRΐ@򆩈 ["+K4hY%=]!]Y K+]*hDsBh>OW~'χDCыFrrj؞M\MiZy"v(k+тDOW:SS]!`M+kX,th <]!'J ]`u4tpe4th:]!JYҕtp/f< *`rQx2x= "`V1OĊ"K/i*2gRH=fdڙ_^~? *qCOB|`C o2 }1T~_<ϔJTQAi-SܽI-H1Wgw *GY+ c [z:WC]/?nxǢ^Fw^aKՖORRZU kQOMt90|(Xs:o{0B:x*}2-s5趼k؉bS}!zuOur!#kz$?ϥ2(KհyR|m]!fOahX8.fl! q-O'vV2/սIRw8]3kVMWwUs}n},u,%T|ɳ#kl,>P@!ʮ>PO%"5e6)e P mlآݔs#i,.UJuF4}4m*5]\>Kqm<\$kt:Z(ꚟ\˻wDag7R&Z)Y& Kt*2jzJC\RQiCDc!\b1!]7Jghi-gUhe]+M !3+(!*"BJDCWגX RNW)yOWgHWqb: @ECWWX F Pҟ< E/IOw ᧾9\q ЪC;_"[ЕjWS"DCWX 2uB]KgՓ{LV4BBWts+ΙƺteCt$t%TFDWXdzDBWVuBtut%Rs B:t bբ(f"iy44pM4`DkMiPjٝ%M)9ylc)5F'jydY>渑^r,ieGvKv0"RnGhgb,"(Db"cGxo"J#{ B-D}ۦtOֱw|ô0l!\&c+D+H QdJʕ"\%b+@+Ou/y;?!x>t^ J>`s꘩vp퉭Vh5=I;cAՂTOWJ#SkE,t[ؔw%=]!]1E,UV:Bjo֒U@i{:Cwa`Ax4tp)-]+DeOWgHWBIGve<UXWt~ Q=]]Il%:3J+" 1\1fE$#,I44pU4V%5_#JK{>G6ԲHv>6҄v!0jFVBf^]O9(M'ƟB#=h?@%(5Y6n"+h<' X juks+ 1YW  ]!\;?%HWPb/A: :e+teג\t]醢ө9ebjؿZ)NCWPt{U(#uDtOz;MrӶAI=]= ]1KV$ pm,th:]!ڍ=]]qˤ1V" p+D:OWRΐDWI^]!Z˺NWҕ$f.FɔCJ(n5ب6Jx4i0Cmo }CclmGtG)kY/XiAF#2S\3x(c 1h1(34TŴvBh7C+:vGjQW+Pfj١>**`\P>1ge8!5}N5s"p9rw;Vg@>-Ǘ6($ߧI5̬wLs&M1@$-XFUf$4J8r글 ?>|'8臜kj `K7CORhj"U-IQ1'B&iX3P/Ƨ\r AҬ*ҋqZJhH%%Ɋѝ(sRB= |11ɇ%Jѻ)D$6)hK si_!'R:a@ziڈ*`DDtƯ2XLզlR\=D@`1zԼXt):Q} ]&|J'{XMI1!Ku/Xg]!(dGhOM6deGގ`f#_fH tS " ^TTlPtA[wZ 4@shRsW*A5V-Tbպ*u%,d.Zn kBhcnu+yXLĵd Hqk fCF+3\iu Qx0vV+GR7fa=gA{1*T*JvdZ2ɷJ(OaRO}`q]b ~L`KU  * +5+0Ȍ̆veBC{w  QA\R}2ɮ3Jt"T]!z@e gS d/Xd*zbJe5͐jPoB+"X2nP(SPֽP C9&X !@YP6{iFn3%!2֜A9 qN0tAC\)L1R"%8TR 3k'T b?XK9(q3XGfұK!\+B4ަ2ݙPHq Ls$e丽`Q g (Ez@ߑPI[m (ĩ(Hv]]T{jFGU]%їTď4f^n!GAU3_Mr^KYJDA ՘`dV."2Db0T{4 {xWP>|p.#h~!:3YO _]bF\*"1&"9ih>(ؼ($0, T&bE!v ض3P..vۚCzsNHx] nUA׮z|ou/ L}6=Dfxs:p~>n:@GV+&{H\!iZTUFB1L:!"C (̨`QBgąsA9AJ$rAV 2*e(6!+2]xѸ<@{'*APP#H܎m }KEUGUS Y_"bΊf lZA]֊ANE"d?_9?wy4S U[е 2"(QwPҧ6H!/FsP6DD :$pYwQ@R@"Pc*2[ҶF%G6CܖSBEk .IҎa<P9@1^!B !U3ڄ`1btGJ߳yBP΀Hn@Gk,BJ35)ٕ P?AjDPqkU[PaTB]$1#dlDH!(DUb|؀C-`%!gѝ4MUJPD[vTjj5]ZZzrDu 2QIX@iT_tВ&haV-6`=A;wͳ@~ Ű^]/m˹*dg\5 Bztqt*ih$ti#ll% fv@(v%[Y:Zm5Ek)DzI(ΓDCkBm1&P.z~4l}QeFI*AIE!9)lz@'3Uw[)^:)Jjt.TP=`ePˌ`*1#K[Ƞ rwU߸YaKl+TOk77;"($'Mm2X4kno!g0E&oQ^1bp0) RT XQTbwv\:&X:W (]IڈJ5(Z54)hc37 _ E5jփ*M3| R5Z3i^& &ci@Zv!; tsr:-ǽz*DEP-j-z5zC-*mP zx?XAVT@9 Z6i/zBLօz4CS3AQ%#pT5] J’#iVAo:^#@.* 6FTS.\Y. b!C1 P7"SJ,=vR@Ւ0[ Y\ yۨO]ghuE#BPޙ#B2 ՠE^ܼX6[{R<aC:3F%,ߣ8}!(zf0޺Ctc|N`Ë'[%f?7bn(-4jvI]/WM^?~kWnBM쯋 WzZY={&+"D3Ѷ[)trrfYv_Zo\?7 Bۮfsqj-Ca\I3%ό=\. grQ>98ֹweBv@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b';N N)9ݮ8ha4JBv(˄@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; Nu,"m+DM A(L?(|(<"@ѳ@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; tNp(|"<HB=0=0<0?0;B( Hi#L v=(ѵN hN 9Bװ@b'; N v@b'; N v@b'; N v@b'; N v@b'; N vnٓsjJ^oo6 V}aQ&0%ݕq hU8zPj6.qC|=!h3%uv*tEhĵPFtut&DW>]\BW=6]JVW'IWZ+0"Zlߋm8b(0LW'HWF΄ ݄]ܻ"_;yݟLW'HWV(or? R >~Ph0blJzdJ {fгgル]6o g L3SCpA[s黷[iti+Th6L'Iru~e2k^7 ZE#>EGmV8f s*퓛 //Ϯ_juSܫ;:-^/r攗K%aE:ͤ]ML>n~^7ofWo৫%령4pypP 2961 Rw>E<λ›AʒUSѢ[3T-VE(ĄXaK\; e,NPycQNثԪWO9ބB$ UbJtEtl nZ'̱*Zxܗ"=POS+BׅOWrg*~ǧQl{Ɂpah#P#3*2]uSF ~wXtEp ]ڨ tutM="QWki?]蘮N4=E?L=k,"M;zuE(e:AB"]:b:L1HhVa(-)ҕBju(5H;S*ڸިM՞~tap M` ed>I.rQ'iL\..uC0o_608R__hl;6|cp릡 |bZ膘jXkQ{rled0#l{3tФ)"8tE6*NL.Hh Bv>NF::[r2;1d~ P"Y(=tFNQS+BOW@i%/"](BT+:ܨ+B~ ]'IW1}lah?v? ]ieCSʦ0J=r1x \tu Z8(qrڼ.C8{W6}@ˁp ]Zm˗tJ xtuj*tEh}8v"!0] ]a"AOWz=" (LW'HW" Kk'CW P#J c'%L3Z T~gv Zl)}@mv9x{1rp x8w@\ ;ܤ0gByM_-~'>cdiϻvc4ˇS񳍍o~\_ezOjfοL6Ey3j#M/eR֝5S!HYɕ666!:7zHč:Ŕ\6K_J9m{`~4bLiDx޼~;Gͻ?V͟^Vu.$6>Rp8*i7kgr>Go|n;v0]KΆw˯3WvaS߿TlCf?R~sPJݨ>I ɧ)^lpQnL]V_ hoWbQߖR˲;*of@?).o ޛοf0+Gt?K+CԲ;YmKA .E3.SthgHrE`zrھ8bρ㈚ ]jdzB`50D̍u5^K%Q%#ctvw{eս5fBFR\hbCZ+}Y jǸNʃLRq+3B:-\h ?BZ}I>m! 6`ձl1 ]e8Ux7s5wӭn{2BdN6/_舍zdyŷ*#߮>dއ]AŸEiҒRJk!EBMǿWS,&,96VXhL 6(=M'>0ʜ? CBCƧj'Oo"^2^)Rԏi R,1W~0uj6xLNp*$Ǭ"֍\JZeM)zɄ'%Ab&3#s;+vv6Mr*>ӫ4E.J༝*Woo~~e MlfР4&èdNHΟǧz=ېQRlo.~D`Of[=p>`qSXvE|jh?B@.I| WnŔ5Xlm7$߰@dc?QOQN/So 꼾NeK~7XpAkaන(R޶1 t`:{`j`x>ЦIm'>Rc7nv OfKGTқ>@#ܦ;b+udkƿKrh HܸBkW]jCralPtCMWzԝفJWe~~oRɆJ6 m=}QWH9浒a:oywwK;_}qH!V \T~B.Gg ˢ+?4/}32^*TQ2oo[+'2v:v y"ⵠmGأ ;zPMiyH[,dDY! D!ˊ#A0a$@>G"vb=IDYGۘV,DJ 9D9DkU9|@lN2B>弶W B KRd,2k)UgCÍ6 2m&7͆>7uz[T{)1gF^>$t?=|!i_Ki&s]P1nq @Ƈύ52G|w5"u~SNu$K}ԩTNGSLYbHR0/GΔrx^VCx?}w}F0T>~|?'*&ԙ3J 6BXxj؈H!.`,=1 Iewbr<'#1\(HLͫ Ij/%aCMMu0K ??|}Lqdjz G&lqolOJW23]of縟qٙyj&I۬ K, jh6:Ӏ D8gd9@ M<5b̂qr=)`@ebG",Il@+I9ajf u _/Hd|]9X&G3ĒE @S)Fd),O92Rp+s#cd 'b@xìqb2!C&E h8M|]prDq?&aؒ]N1x.~G#q\6*2qȩ $ `pH $.=č٬ HI'͌H1i.p}=b5q{W>'8[&ǟSmeh{I:D-(EhB* KVTHD BrfOVӎzK'paWij%;$>1A-7_?[c)^s:ѧwiZf7KѤ !RkmO %'B>M"3OggƣZ:DQp5mcT"E[NSJ/%y0ar P7;<1Z `cr ̸I 2tt.sd"ks39e=1bs9Xm}?G)W*"LPAs^'t:1iE3^w"(MY,K0+2$rl3㙒)̅k誉s؈Wkt;+i Ts5LC6 b|c]j nr[z20 \XR.Ħ;걜,\|V54>jc2O؀7c0al'ɲ;|Pi:#Zq4NG/]LƂA#$,XlЙ;$=ʩ` +g_f1 O艷Z O2|Xӆ ys"g6A8ƗaN;wq(6Oqռh%ۏyŞ=ʭ&P&yx2(*:9B#7 $tY=8AYbCePk:a!:1IEiv{˭bˈg]k$" +J 9SIg \["s|#AvpA/Fʲpϼ BB H jgEe;&Αv_:m簥cL Ұ]d̓{6A*rSiɕ,R 4Bw>eVj9~/~_GHFl - E$$eQYe-dɗഗ^DӂP 9yIJbbV|0ԁlu7=XB@{Qq\R^ĉ))#i{G b %PcVz411cY!F2zk!n)fj*r.Ʒ<91ZP)VkVb Zp] ΐgen|}yYUMCNQ:ė-gϕӗGP-:<؟LHM^*D&zqӯ3KƉ0]vqco_KJ@WfCt=~xL'wv'{ia2os4'% RiNvJAd-yڑzCUflY|H UFjPPy<\6 ]L(]3wꖯҽLi(ay]K_bU~]|C-kv:-OO0Ңr!Υ7o韞u z?"1 hldrF3 &دڒ!xk{wʋ==):JY5Zxb%-%BFB' Hl!Z^E6*k'N'gv\HDs~pjED"_,C.f@m[0 kr05('/*rU95l`ߢ5mcǼz[E;_vZǼP8F2% ]!&7^ oE{fQi#GcuUD VSohxy~{K۳ǰ\?7j, >dȽbNznTOhI)Q(r&,DLrOC} !&mx3#ͼ-l͌m0 e| h9g C> ` W4 ,=zp=t=7SWAso A|c2 ( U̓K->ǞhI<]eAJ$LhBr]3Z]NdD*p1 Lȱ߰s߰?bf٬f:[q6C'&m롍Cl+sԶAﳿ xMs-˔c"b (iXfLv2;8jrPV`#a:撐UkO& srU$9@j83d2ϻR W^}}[jz;6+zfBH-sm~뻿~U̻h-su.628cc"ˬa#kmYاlh ;` #諭YIr߷.VD[R(,EvUWuyvf6Q+X|ŧxT{# WE =`1"ATAk3nfBی% 9&QҊUpgMPpZH| ڔ,E=Q,=oAW>Ϲj?_jO*J#E*X_F+bփ1 }XkU٤x俬}]'7ǡ!T_vpD,'y_-B*Of%i^ERԯ_ROv [ÿF"\XUxl><@׻Wގ{;|!6r ]ūMsfKyͰ<嬧C|u"-gzIH??pYkƱPx3\ntS餵X+SJIY`zQB;o*wɢ䮛uwJ R/џ&w:C[+`gN?#R JϤn8Ρo6]f_BKzL}vdzܘPԘ]_Mf/ttϦeG&OF4qjFZ/Lcn̘Y&0f~yB `5޼ӭKeͨDt4ziN.wQY]@"I#AD80GaЩԞz@zH>RB4H&rc\Я^'UݩCq.@~>0uu3UJ~F `%t*J~. 3 3@NoS!0D 7btf˖˚GTmJAB;$J0[ǴIP&m!&%=IIG6׬sBYi `}.J*ΒO%zb#&BC1i&IkI2 J<\6>9 C}$Hn”# dzmWl&/H~vAx-V޶헨 珖STEQђ #=KFGF8CL#" j'y.I2FWMp g\F"Fpo)DDz9;VIR' 5.zD )q)A1!PASQN1|) &^|CrB"v Xy zJut,sHq1`DcI3EUQdr.9p# :=,&΁PQ xBKVz|/ 22}{)dMS1E8Ӌ^:;p.> jx HJCK aŸ zd}sJ7A 7LN#p<vZ-"IslUѝkcMk`Ѐk Usfp߅F#qT9k$_TTSz(qo 1XA)αlKd%ڀ.X@:#g_kqaњ{J$v5HψJ{#cxNVHÏTrPr a鹏> p0ufe>ZfAx35S2Ɣ_8cimN&xYՄn8aR`g r`R]ER;qЭl*;cN sbQPxgq\ԟ[Uь%H'DB&|T7de"S:Ld tnDvL]-#[Α/:y@c7YY4ȴ4`Q* 54rge:%P%1fkEV}Zm8`K\n㪞ZR4t'2W E+݇ۖ^''ȎiV^pcmϵa{a9k] 6k}-d'wSXa,kViv{$?@A=M5C1~@xƻEj$R8DRJ 6)rtñ <:E Q*6Z؞e{.CG|x!<l]og|Lnm.8p eNF"0xH9&=epFZ6^go$g=5nV˦V@;0'2a@ㄑ"gY$/*%wĔ"i(- nd=]]1TF7YMf?is1մ_Ӻ".tqITC&X.{חz7Q ƫ+$㗪߲ p>~¦ѸWtv=ͅ_c<^aۍ|\J-oljp?^U7ai,;n抄aՌ+`F/gtq:Lv7q^L?UcuwCjmg>搜qv3^)!e'\~kvBU7"Z5 #;.^WœI\ρ>[/cJmq"EN7((PF+6Ys/9nrwqlG'$6*F9= % >ȑаR6NSPu\_6wNg6|oQO99&g_԰ws~+; f( Cne|9릖g8]_ancZ睽Gͱ^1\ i+Yl:Ⱥy2~Ƶ)+V=2_׿T}q\=Z ogAS ѣ7*:pއap&Y)|<B]>!+lɲE'C^Z y T,I!\%#FN( ́>e-h NͽjWO-3퀠CGꦗC,ib };oQ'4FhQhdJєF05i$dЂ3 -8Zrie`F5hB}PM"u4@zsRAt[h'&gpJ~w~G}*kz_$($exـ1B9=rTr-- Ĩb9X ؉'_jA+sk ]"u1q6] \N\fj{LC曆!qaSvruX]G>q^_]G1RWtuEuuS&A+$~6*KF]ej9黺[TWcI]|F]er>uR2 +1 lU&\RkhU -+'U&٨La碮ZqlUA]Au%)kL.uYeuU_; NA;͚NV2Qy%ſ)~a:|c\;抛+.R(-bt&؜Ĝrw5\354h-V8jً7xYq.AQ,Mt[DŽLg_s?qa_f@WZ~clQHч|FH)">:Ǽ+fs_ޛA!Oy[+Hvrۣ}ikR\zf.|)豹}GW?ߞtղQ'f=z&/&Xodɥ/7=,oH/&DDОhPh u(zBite4[F˛_wbv]]492 8ϳ,2O?^WFyxxۮUN1nRFWӤ+oB0ʼ/GWĪ.1|q]Nza]Õ wV/tCsem*UuܪIejyteQgѕ&u]%]WWi"]8O+ÝGWF6u*dYu`n].YteI+ uEIWy]N+вd(}uuJ>MzdK_&%})=S+x&M8^9Gp}EFh6ʸk*5sz=S!0Qwo?}L'.дzGnz|I}4f%VFMD7M54r>Ʌ?O?W'ZˍNn|Sp7I]YرoL|ppg FF/'EDzFWѕR4Jר+q%,/1\Yt"mQ}}5*&)itefh|2v]}1'V}uSdp0KxYGKʹ2ml9\\U5etmѕ&7Vuemv*dIWA0 7Ytez:JJPW1D24O2K/dXG(uE9te?dnhSܺRݮ+U!Vite^ջ6ueQv]]Зp*42(h?}וQn]WEWtp6!/)Ȣ)V;g49i4mf4hŭk(> JM+]mU墸Z4%O;h9eM%;[Wh9:5tsp4hR4wOg}L ϳ7Ϣ+akF]i )D0_|+:ܨhy(ur1q։t`!FWak;F/GWĪO.f.?\|å wR'{OZUB_ʹWgѕuev]]>[ʀѕjEWv3ƭ(Iw]]"iL3+3DΨ+hun஫Ϣ+J!HW&? 7LR;QuuBInlo*9X:TOF(wb3bpp2 7Y4mzk(TyVoUKϗ-r-43вh9]FlADЀ' +2K(4 A ŦʀCFW,2Km\Gv]]4fr8FWIWFerFI2kU8npg2\q!0ڰ;Fw]}9^oΓZwå ZGJW(ec]WϮɻ 8Σ+&FWy]rkuit/{>a9St7h_Ջzuwg%^ My׾}|o|c^>|7hs_ߍh|Aѿ>Zç{$}WJk?~}e+ww߷Իs!ڛ~{u}[ݼ}= KgJկ?zʛ^둿NCVNW*7]W^g}sާc{zc}-z'qx_G4şjO?q^=}fku g?0qYm6y?k2GTOϨ_^s?(?/Ay;n߷77?ҭ}~[B5Hm%exK=T5GW}㴰͗H_~GQDHW/bpͫppXoep4tnWsI1tT&_H>e۷SᠺWɡJ^Gt=W񸄾]c[H!IcX"^=-:ܿu)`o߿k^hz'h.CَTBY\R}\$礕1hAVyZ7TFDgY|\/K-9*K |VcΑ6?|Hu--KJYwB=,i٧X 疜'Ѵ"{ㄶ! ոsI3M-,KHtQsCj) >-ڣ1S\3".h1 FZ$ ":Z %ZT O)UkhK_E ̜Cc<&UVG |;h$e~]c{sWeCK%]{sE+d%JgDOQoyq.ġV")yx %vKfHnɾ|SI𡫎zNӂ[2l\|Zт>a_pC@Ϗ4Wf3w$s: VÐ"Br `1}]zv¢?ybE%ץox8)9De%J;R"Y 9.(Ct8/CH/F^q;0|pE0^V|FU`5g) KR18<<*{)*$:?-vZ觊yd;xP{ݞsNXEBu􄔇rK? qkm!^дѷ3z !pi a$>/Ba鸞צ4xqh0f F. YhR)C):4Mw/#AQ*K 9a\'x ڒztqTO0Vɓ2 Ya64`n& p $ YG3(]1_bE ULvcG[sd# &Ga}THMq$ؕc;\}'͈jHowhcH$cLS0!GۤP &A"@YhHѩ DD|8ږP1o5z}v0\&y #\B DM` ֲ pIaćGPg)% ;-b^<# 7so0M9$J q9eqGaE;%{ h0eC_0|aZ> (SJavCӣ 9gM]% RD:;Om"z^Z*a"$/U*,chhDLh8BpN؂XQ6h!Q0 t`Y+NCєALeI͠tOW]cPlXFykНDN9R`.&p~xnۚ7+{>i. ȽףKK{wGY4ZI|\QCuHq8;Bv{WhTnʌج [(cc r6 &_tVXL+3az0z-0Ę #ohe>H$@.%A~E LP._ U}Qnc !.v |`1&YĩԏQ?y02*]rha^68~6dw7ww0wq Whm^ҬP-Vڂ܃XA .2(g`!*'-3]K@_.*pDXj\҂y ""ۙOEuHe)3ZBЮ,;v4 y#&(-5i,YhӜΚSPcuFe GրGDM^kmȲ0.6-A fId8IH)v{t,٢D[CdxֹVmZ1xc~6sP@0,QM!?Gx2 "vݑdDhÕ,"asWs΢b ӉTI#ZE.AGYZLE֨df)WTrVvYE,z/X^4- 3H@ y6$o^Bec`Husmĩw7 |ҕ`icd6-5-3 !K0um1Z`8L"FL-F` ,E 7YGu$Ykpj09 7C0/F~qlh 3| gr7,J$/7!9͑% -|5h+(R(]&x*D=`TaH>Kkp'"jzZ2 '~V'bʅs yrኹ7SXNt+ϘV`S! !ZZcx*Gd'U{=kuU le0?Vǫ*@``@ g}~“@ ǮƑ@[@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJU) 3<`J 5b0J u@g脔@)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@'BJU'p5h I |8J 5|0J %^ ,)NP $gZ@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H t:JKR-ыԴ//@S)ytxI6 Rpuj(%Es¥R .pS/t qךUCٱPzƉNڱg]NO+b8U+P誡骡ܱYӡ+g'yuJ@jh ЬLk%,諝\ӗ=Dν0l{X1g5]K3F8>cV9%3b'{;mܻpLLڣOR0E$}$ݞ5ЛgeOh9^6x5kS~3 r1 h6/_l7כy~I_K%ѿT:Q[aWq%L{ӕWequ9~ojg-%۩LS \h ?|F^tZK?|_jb(S]u9tM?I;4ɏ䇬t5ͶcNE cdE)c<e?| jȲ٘PY9aZYkrGBjgtθdQ`zAfȰ3>F=u{6Y_NP0~,.F_]_g ׿:fkS/_8f{Yܟ=?oov[WC·;UlB̹YH.L#{={rkzǛR!5 &hp-Jкg`C='wXa,):vtP Ftut$C= ``mhUC):ER8DWpUCıUC%՟ޟ!t\^h{:P#+]yN}e~@t\UP誡u (DW'HWBqԀ  ]5J;]5Z] ]IR2r@Uԏj|:]5] ])n2 `c`v(t<|:]5] ]iD(dlXvV}==D쟽c? *›hhgeY+h`뗺uNGjRw_//1+ /pfi|~g"{n5՞j]wUG*mqOeSLEI̶{w[_Y' t>ڵ҆@*x2jC ~K*(lp`rם4rS 6zPtF \=SUC ҕY 6b0tUC~ (S+oqC+L}P:㉮,tæ^3nU{~ >fjOt=QĀ@W=]=fyu]5Z\BW NW@)$:AY?$g2Оp ]5;]gJZ%_3``5r(tڣOJgNu̸saCW L2꣧ȶډ>]i\dfGsvf$F{dO <(Kk'Ζ]kPX}PrE9I3BIfK{هz{=ؽ9 h$&Y+E|O2ۃu {K׷:1yUnQ5Ð.0r8;]c]ǐ:7]̺exŮ"]|Nqܿr3U.S;UTzծ{E{M8|͙{= vď?~o뛲y!w|ώ)ZK#u$XPwp"r5i?}%ytv]O\EK6Y֡XbNX .wŊ87 '6&iYMyr=V  %(VJEdUfrJd.3 2.>3fm^%{MÎmũ_oY^/.ywot }a\|]..o^m߽kf ߆Ŧ?M:ʸh6\_NzpQZ ľvs)Ռqa֊WjrϬڞGzYN9˽W&QVL&X6KܗAXx8G$yDZw-E~5u&f0Ńz(8WWXZ˞|PjPln,%~߾]=dxčC5f e4ts}]xpyْJ~=<[.o Y[9۠rmmoʺ[0%X,0Y.o>Vsn_r6o -.续ݖI}A?̏w_a.z4ߔq+zt]{t3b|[uB}|OHaZ<7meTx H.َIr(:ɨ$2*\1U,b=Cn%v1Y,1Q<_yL 4c6dEK&+R(BXQFXp`p>5= J":T 'j@'f˓gThy~/Yh>[ozS4Ӫ\f*1 OFW'9 fwIȵ48,S]Q,VVz?ϷiD4<)CIiތ&V2TkAm-BHXUtVB !X,pK)u"Y`pu`:C?Zzp"d#1JY(;2) PLX*φDKKNcr!.(}!@RAi^>^2mdukdR4RI֦ވRR#=16X#R[E|/zbZuzbJMO'f:|v'rGP#)@͑!e>! dzwAߜlmCۋF\).ׅtJDEKƠIME-nS/$==W"q- FȜytuGy ] Dm l>g P20Ƅ,@J!KggK9X{N/ iUĔݠrV'csBGI& ;\őe<\盿 0'WjR?'FO:51t> &6fyjAE.?E>)&EOYtmБU<'UbϲeF̄<nRxtO~\܇AMSgCO>2QkZiy| ~3\<5P}W-{OIfk3VFvTb()-ϓnפ/lu^_g沵s2mVyS.r/vWԋqO6n| ,/ 8߿7Y̾eR;*'W$Ͼm ؽ?ĜY?wfdžԯEr}ܱxYGzwuF5m>e_ÚgDn+/R|莏-zsx޻ٟp-hǟv}t)uMνa69;<1π|Ƒ{X_~W*U.&xp{6 =v~c^ۊ2IJ}EP ]QUR:"D#X0ژ >;̜BS3D0+\]͹:7 z rM1(QLH /d=~ e[ЧЂuKX:#T]cw X:%|XIgHMRz-31L ,+TR%PhСӘJ5_ݿ*ψJۙ}#ILa,<K8HwF$o:U; Cp&㡸k][YvRK"ԂNVv߶Ǵsˡe !%y!9BH:BH:7l1YVZΔ@E1 KI˜qSA)C$mB"ÉLP* '*)( SZ'" '(D ̺9GBΘv7~=G{曛OW<_wKk{z3%͋w&ύ\S6F0SsTvG/m>Ǹn*@IfJw@VPBSULbaFF3GrP"G]54eKi1|2c:r"&%LZDH Ec̜# Wf:_7x*`lՒ+_[F^}p_G$uw A0CdIg>m0:BXkX1.91Mˀ QjP @$姦d U)pMʨexn%9HvF>6˱&S''wܓ ҁo`Լv`r]o5׹Ү/ݼz zvibz搋'_>Ͼ]䢕"t:DhtӱwCeSY+ϾfR%ECk\4V霵TV:T*;T4JaLMLNծbd]jp`&BPnjkE5Q ZTxFTJZc* {_8lIkgITWʲe(dSbyBĐN\yI{/ȼ9sߗn,~+Hzӓ@}ވUʵo)%`=x}՗uf4 g;<cϨ\P : ϵaUJ1.iFՆ )L-D6]b m4lѷT?aN}32eۭd3=u6vz̿mҿ c."׺N-I,cgWŋdTJz?@kS ]0yTӑוEJZD [RZ9=Il:]]w[Sz:Xv+"K4N\W?c 'Sن·:P Tŀ!IH}vqFd2ewwbjq9s~}x 1γ9Z5}ƯQY_BÍ*F;1 m]XM1 "@3旈IHme1 :)G'p߳^+Œ9L5Y;VR@"THp>s`.dtJ.z@A:Wx ;"5"IYЄc('  R\%I6<+&թ€O#fG+[>sf򰫎=};YV__x 0'BD>'b_d8S陔"FxcSg:[\zvBπD' (3Md1>df^rIgnS>{,hA*:dw֑U8l Xwو/)/~pO 18%#Am$炊>,FȼיRQ0H!tl:I#;IҖ:ࣷb a`]|}?i>٬pARK) FR.vm]z|5GMޛV*NI_[GwX%ުqmYU _ʋZ]PVγ+B?=ͯn*u]TqAB cN at;L=JCBMuM k^r<WBSꙤ` J#JJ(ƃc 4:aJJ)|ܩkKlƀLA#-zjU%-" Or)+Ac38W'^/.ub~ئ6|uRՅ8Bi+wP3Wt.&NJwuO._^s^RqYaجS#*Nt#1#sf'-^ĹRSOr]H$-(9Ac3b`txQaBNL %ثZS#e.48 c(usULZ Ze`vevgˢK6:T?OG6 ݎsװj0 (C'PDђFkR|Tf!00bL0zn];ےVi-yr^RL*`g#&BT\ B= pmc%PI;;m2[zh.@|Zwh]$~^{̗Pje/27 xSb_:@'5i0n]~\_f-·;`^R@V^*Jw/dl8XƸ+gv0ޭdJy6زXƫܝ}S66u~fo̰9zw~ͱ?f/7zfnsr™;2񘾫[Ih!|0gý1}/n]D&&#jCaPPK2:ՊGn6h6Ebm[kh-1ĪBjO%p/̿oDy%T[OSIc,j_-~[!!P?oZ LjYb Yj@ k2m/}Zg촁]_ԫ~Y|.A`A@~#|8mv: e cQN] kK8_@Xg>Ƭ2WoluAR^7*]q^Pf(l!p,X`Rn)'\crzGaBɫ}z+',y) !\Lp-tiey=e c;*\͝8{ۙLiE.-lOq7>1XO,Ƽz@GQZesY.L$1\f^?DU i vrQ*ETosA\Xx#`gis[YpbtP*XUԌc A("8` BiI8>=Uͩwkϭ 8%/_ˋE[}XEs7Gշ_UKkV_|lbG0|g~}5;d\u\/GHcv*mWY_;%Zsk}ln01nlzlN>N&L$~_F{56h)|csk3~\=B`L<9MUӶ ^Wz,Xid9^hx]:Eߺ&xQsk. :$9AO0 7R@0vUצR5 7{9pA) ذZRĒSS@'֖ JʬbN L[0ΕĶ֒74޾v!z:X#K^ Ak~/&ݶ;B5ErTE@E`JPZV0)#n+4ܕE RpBa3 9IKwƷdk֟M̮4* s 2cUBkخ\}\CZf-'}s QR5l}5ktˡ#btdvp8,]CkDWP֥dCW]сvzj)6µU%W]!]]!`e+M*th-;]J:UUS&DW ]!\S+@˘;]!JI:B/%d >@Gthm P *:FV3W#Tﲻ(Mqze'ltȱc³܂zt^m4ߑ995cbd~{6[u+qfn^8'aRo W)d<č<̝NN%~< _~쐚Ńm{6D9SKgXy>]^oMb4;4J˔2UfؓFk:r%,Gc3 [s^Y[ʜk]<BNL˄aY:;V+ÈA>BeXSayJt$D&CW%CWV>Qj6ҕFdN+ԕ d\V4v#+OJ;#؛]!\L׮zb[==șjXCt "mwZjס+{;~zq1T-u!>G=^,P*as웷߯qV?fLcm*kI,~8nx߭EwH>܉UQ7 Me^54e_[T6Φoϳ/ɽ8i&6)WTI$ggJN娒D6_:&{⼾:p-,i7GG*9_̮ހ oQE@u\dF(Jd߄7oG5yL/qre2NdkꙶD >~AJOF[CR!Z!J#m5ƭe,\/yyZ\뎫4zUw@I𖹄6hI}Q%,j)ro s2Z&| K2 yCfwgQR|:ʟ2dltDMߕ?Di cT"ʄ 'apq)"ZD9l%]ťN!CgC+z(%*MMM.# Z{Bb/zo9|D4t 0{J!U;U+lRL-tS͙5 b=TA;}] t,tŴ=lزd JJS+@ݞt(kklJt7 ]!CۡTb#+aL0+hW^$Eןf36"2ɸ٘ⱍ"$ѩՃ1zwo^= CdYqل"#R҅l`^IFF0TjЅPV0A+Ȉ B;t(NDW:>tEWd wB+ih]]Է'DWX]!\MR+Dkx QaZDJAXWq;t%Z=J1vňjᵫvk=+ltUUtU='ZЕjסg+lM.M-}+D)@WGHW(փ^ŭ++L*th;]!J=1ҕ F]!`]!\ ]ZIE Qb+mo~-!*M*U{Ǵ<\eb:4IGuOaZ'W֩g#}ʴUa=y'• v\\Ypjc;T偫ĕ ;;lri\\? TJU>ʉq'• >ɍ2 ݕ&'J O+,bqq*\] =Sd Zn *USf4:}*q"1?컴cyͩ]踃f[ DQ3nxIf_%rsc=;Y%7!{f\S{.\SvL.UEzV$ْ SδTЅs&7%lmBO;KM:4ғtg?-4HxP[c4Is(50;l0*ڜbrd֖j;"FK{(*#s'f$hGKbâZtmt)9^o`bi k+fL8).nH sGҗ$11%0!1!o\vV蘸r5=QK&H _hB0sbn]mRPi$h{6'Mql6DR)C4 $Xj@cVyQFMC2K@oMC#FCIR@ :W:M6Qbdwh騁NQ^@TN5t.KOD*K!ssQ鬪$\s=s#L mKьdkF_!K-%k"?л9;rwcO1Ǒ֑-~F_[2dT&J[5TP )E 9KFcqкIȆ&%xQ,鍕E6Mr>dk HN@#'-U29;H(2!EiϒaB,AN( t&k _F*UrKbR,KAn10x{4 :ux @$x-]p}qDo9'Ḁ7X]+šˋ:Ʊe60.Zb",4Pǥ^l0M؎,=tpX!U8Ww|Qtǖbakwc5h[5 d{,"8lk]tDĊl)v-hߜqj Vp2)lPQ&t$H"dJ[ 껡L8Mj &6`FDUgdQ,ݶ`? 5FnU{C{- 3`Q lG !m)HX ".H` G`՚Jy>:lAZtQ,,86`&StYjMPI!06q%P4yȠΌA|B[=yRQ}/ĥi& iŘʩj0P / ~N8ao\+=y߾{ϵ+.Y/ލz@!mFLf3x@\8#\d.qXC1;6*l`VPĀddY_Xq>EG%A C5Aϡ, guo {u W˴^ݼ ZdxlQ.n~xr gjOM=gߕR𻳩#bjѭZ֚c";jFq )kdGCo fR^\g MmF٦FQväD{ SηJC6G=T3ʍ[\{6r5w*2\`%0d[SAO(ZS|FGշmb+CBw@+E ĭSN bX1wHRMopF-9BaE5FHǥGH<$`b'UR,h5?=56Lƀt⻅7vS+5֦Y u2m8vf҅2[T;*g|AB,!~]B=!o{*6>BLs›oR8*C6^k`($m:Z3F2g@X^2F:, -Eo?ٴ5PzFe(utqHn-];0iY}4fs˥: >.I%b!c-ٱfܤ>Ƕ T$ qy](Ijl$diF*yߵzu!BPqA@B ƸL0X5+zB迾7-77ֆg=\EJ}Uۀzf0YF2|rr<{}niO?^|g}sg)=-ըn.f_G|\9MYl\%|?{*Rk$P릢$゚|)YhIYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, 4$@ hJrA~=O1%ڎSL ;l jI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$ЋM)@\$ZKHhCj= $, 4$~"SM wlg'{@MSp)ǔg5%SBK4L-\~+ bOjCOI% ϛG"iWi5f кiz.iA?_^v}'}t*#!Q#c`g'Qn ;V篮zY}VeʄL`O}^?s׾9\u+5]yn?rדx_Oaux]u9qOCWe Ate ݪ IaFi]> XΑ,=uKڸ[l֑ ⏁уOph) %}_tOՕ#ѕতEWLzuŔZHSUI] WS] ml^W=%&+"i0(IbѢ+rt]WcӡÈe 84pU-8ܖBWdzl{L1EBjtŸ] m̭J(t5E]"D銁S?\T+M*$筺xAdlqqYU+MWJ q3EWBKKtt\Y-e$%{1<8oYB#Y*6к5] GF޲XSXu2t2ܐǛ\lWԽ]Ho+N͝YisR o4[b:ZlmخW ղ\/X}'q:xvr-qI6ByS/Wxi\) 7;ӖBUl>Ţ0g̨Ɉlq}V3eP2”1GuU2A@EGWKEdq߼2jyHW] nIZtŴu% nMЕX!J<4J2:ZrܻmݻB._kzL"3銁spjt] -`MWU(%#*ҕFWKIv8jTWBn`"C%pjt%ZtŴcj t5A]HW<:\JhJ(S0]MPWr< j.38;]~KS~?ߑ>G'Gvӫw>6iGt=$dG|H*(Gez _,; wO^m㵜WwcFW^2>:'闓n`Tͳ8thSXu ܒFn=xM~=OG\u/Ӈ]5!Om!.yuc9;y1e 1'C+~?^={RO)xobm߁*932r}{L~}%%rEM+pҳ>ú+?(|vM( (1E߱yat]U?4:8:ڧe(ScQ[_+ozl{IdҠ#5Jh+hwHt ] Ohh +l7HWNz\Ң++YGYjq+J{WKY6FߺMWSzHW ѫѕ[Bu] e$u2jwf1c8,0T.)i U_w2N^;7wN8ĔU_ײֲ:SnN|b}} _6?]7m :Ŗ& VՖ;ab֭ ,"zB)iWߔ7{_ٖ7gA_ɕ?짳~_Nsw}?~n޽wwo_o%^/k"b&/r:D`ܢgomO<%OqX 9nqe&Z %ؔ)r uGW NC9OyJ)OSU @t& nP+ e2]MQWJ pPNp!`BhcӇCuFοT0 :X8T*^Vt%@jt%iӒ J(j EbAwKmέJ(ə&160 Jp] mLJ( +bT+Т+MrBj:P`=/}#xH!">8o(QcViŨEBKJ2MOREv=wt Hؤ8to=|j( 3rm&tݺЭJ P᭒`W;e^;.M`.uGt]CB\.a;G.w-i:,nMs =PT10ܨ恭B? cQ+j -Abl~uUP 4Eb`JpkѕЦo e%Sa ESu1y`+Q[BZtMp᪀!={F ю527Xhzl"]]5\=Z,J([yt, ix(g٨: J( &ryEHՕg=Օ5ӒJ(}0]MPW]r^] .ѕЦBj@.<ͽ;t |0Uq9:TU ;NᖬELG B9tdKT2;Oj2;Fhi|+>Yjlw1.qoQw}}h׷2}EaΔ *tjt% Jh7+>1]MGW0ptzt㪙w"y] ey'SIS8yB PJhS2^`ǦCWsS<`7|搜v([*t6'S+&T+}ҕІu%iz] )EJp3hѕЖu%MWU$t銁3(] nZt%[SVS4]=rDT+_JpBn]WBV]MQW(B}e|糰8G:2wTy{7AKy kb钶7hQYQ+E[eԓ#/z y*n)f蹥(zm )}0]MQWA MbH7f-Zl^WBgT=a`ѕz5l6RJ4]]MΑ]U?t:0t:ڱVO6h ]M/銁ɡ] Zt%+l2]=GڴuE785ڐZוPja¢HW \F_ק{-@J(&+4jb0ŠFW j+Mغ2'u#"]10 ѕতEWB[bJ2]MRWa'p<9C#W.FUvIiҕ iܠBNMdj9iy+AMu%B `z1J;6}:t8k\ᖑnꪎ7V] ]%cރ8j_W zt%cQGJ(7]MPW"MUpJpcҢ+k:MWU(ҕgP+-jtŴaJ_LW9t%o'U v(t5E]aYh<9Ô (B~8Pq3i*SW#MWB4=EMRb{.UAj|Cc4NC5͙kN՟⏁OpR m֋?${`;/cJT銁3蹥(Zt%ٷ+,t5A]5MgXGJpQͭ5}J( +J!JIƅJhٻBcC.16emi:\lzlCG_ק7] mqJ( LWUH+ҕ'P+ͨEWBKu]1%:2]MPW1{HW bV+ŨEWBu] ekOMWϢ+ꊁw%cXG;Vu-lz]ame{6KO3{Wa6ڪμLHIv? JiɊAbRfJb]nCPQZ9TUo%Rf;`O. TʻiO%~nZKDgR(c&}-J.) [ÊJ*ە[T*8(kP5+Dz*i3Atj#ܡ\Oꌻvw4FtWZsNM,錻RweGWTtWu%tr&]qWoz*Etܕ>b] `U1*sepWҽ:V+wsnP~@,q6J&kA FHlT6}<(×||Rw fx;_8ᷟ>Va;{8!"%ƙ$$!&2[)WDH$b>qp-&]*C"xw:?fIXͮ桎yn/f팽l}&ZEr+eVHdvQ#r҈HL 8JLSֱ.2手pR%j(qȏ5eT]E.=TݣZ?>vW:!e; W׾VWgʫ?"4_4K3G݌p7mrL䁟5Bl0kn+ enA&@sOs_FVZzh=4Sv* ~fQUu]yjiT2һEFt]θ+O >W+bTjٻZvi1b: |HX#EVOs;^ *ĦK\i"Ilj!2udFs~=@ 2vC_gK4?K,_D ׇHEI5=w 69nwxwˈIr9$+fS%]Y~<逜ũ?`gl996מ?(OI/fcO9_8̌כKS~p.6wQ־{͇j)`!q"12TsTp̼AhOD`~6-/]x7-G_dz-lB'LyAB57l(*`w}IcY\.,ag$. GyJ 8!qaB{KV~TeØ$s-ȫTYuZP*15[FuV=jzw6!|Q}NF5LW@=tq>j~w)P{b`kOp5/^ GSx$Yd?Y'\τ<-n5]Qw<X1q_ՙNJImeBٹ,5?s;=P!T}P]@"Vnwqbg3]yta= iD'Kѥ 5\B֌@] `#e |Hۗ"tO@;G6ɂMfn$={AQɚe d;&7>?8rƐ0KOT9erH߳w?C]YƵ.[>wptXGRHdQdbǽ~>%"c;Lc$&eŋL!?:S6'?β_Y.gz4֙UX!DTke)>RvqQH*wB/'RLQi@}tsx$`*n>S`sxsSJ1YݍLy.2(NmL6#V+g|JϳvTR~VqMI J.e By5FK7̘v2aˁa!qRdH`U/YH+?$}h}EW)B%Z9fIIBLdba᷈SL:56I(|*3.N]+jXRNY/Wa Uyv8 761ێY 6>zSH Et?UdSHʜάL0ɦZ39:L~g}0=v*o,mVH,!Y}` *cvBrL]v/!XĎRo/E/zzw ?JגgIXD~t<)#x*R%֖rǜ`@P,*YtB^t1RqK!r4>/Lcs2INtpt1.ʼnW8R+)3Ȩ ь)[@h>gタBF28uE& "-WÇ?"i3*UVRT\{}An0#uXGV1TC~91Bt.{QhP2U*VJh*jxUݻTX l]BhTM?ϥfܔaFzR;Aѵ)C-'nweXZU8`|nbJpmZ[}z"*EѼzQ<@]`O߾-˄/X.3\==`O|"Jͻ5UxRs(J-f׫k -7LJEfT⪎#?sWu˞,"xx3_6W A#C'e薼;-*02 *JKZTZmRDⶔ!+Zy*Óh  &tS#^{c3Yүif'XC67n8[t &V #0A=z?8uh\VfõRs`YQL'u4fIb @FxB}%2AAŧd$K9.A81 qIc@ T&ӷ;x +dDžU"Ecm%U sOD2Y2%|6Q WtvQm?\jd{c@s7h4b4VװZ!X}En`.\,2ıEJ`NN] :4ȝ39Q(e?hIsb[U Ky ?s5[B$VWE"_6,岣ͬ6C8ss@(,G:Mq*ՠNC0@rV7I/f>ߙ0s`_4o0bв} Yٕd!.mz,9=$c!: |=I _9ǟdX? +W85X2n0O_&bUOjp3aO{s2,%,qؤ6EJpׂB:7hŋh۟!hTjU%N+:)*,@47I/!lUh"dSU-/^GҤC 4o7eꋨVێ)e$+(!70(7m~ @D1,xpyR*h=uߩf!MHKѫewj:&kGxcY3׊82Z4XUʔi^gոL6TC[rmF9۵ZJt~e? Lk |fFk\ZNU UMjif!~̑2LD N\FUÉ~-Gb u}R+c жNrA0Mܨ1.DN|Dqd}ۯX5pE BPdMsfX=s, հh|Fs?u{MJu_dBIEbǘzXމLb^ߛt@9RTIeÚp?ʚgg@Z@!dD7"{v uRDo11MK e-wRV3V|+ ׵CuNك~5NW|Vp,q6U!|'Tgw8k!S*SZԓx J.B%di 3L:EΙG^߉6s̙z/R/ŁŶéqkb+XٺgnO(B)@Mm9&ݹf;{ o1JoԖpu=E+# \W~a/ \a((# E3ҨR(u\9cp{ a3y1Mۖ-ɲ$_A{زlnɖ`1c^ŏU,uA #kNX=*—_Ӝ~PJdkQid%*{ WQNj,mz['–FkZ&*nN* -l=sR:i%7];quE 4|ٵ=]9BlQM? J/=pP{יb^ < }~q[Ipv&v6@0Oi)slY|g_5}(@ўVj(4=e3))wy R(JeXiF@D$%Xqt!!J_"5W64!Z줎~Q\@0#8FF八bzuRm=6k~y 9VcseޞthGT`B %Ns_b,cc1*%-VG__җeJmtMh׊Iq Z, LLJI nmolf"E4Gdj/ PFsOA/7|rG¹gۢ(j2:}BCr0Eŀ^4_O0MzSLYI۝QLx<C@Tt_7f?_&&!Ϙ:LbYR;^? ~E m~m{=ճ8n9 1$s0 |Qq8AY,ۤ+R|50i4̰Mn V gqՠ[ov9[AHeLLY*S( ItE%V)-63ɵz1\- $jao퓋:SJCh^mBu2ogj1T]s0T% M(;{Ze=:úmV꧅Zۼ'Xo_R̈́Z6F?]5;ͪhoVaDexĬXL n>1D\ik2$ߍك?Kcqy%6I}!GgO:<\c,] L15u,w;Rn8r২1ЗV)j1׉'u.z,ԩe1K,Du?K"K̅@1 \9u@e5/5p gU/|( %#5[:~qFɼW]X\S̯㞦SZpQָ=|Zi{.c[Nu\G7 oBatS):o baW% d81i`C0aJuթN^":4 "r w^LW$fϪ 9>pϩ'\)>BV?b1TgF,Pq 2bDCP -5:Â3(ώ^P8S=W8I. ǷGGJGMSƳ ~Nqa@x״ĤD@hxZZNuJ}B,TF\jlw}1L&5yV,g9QVBs23 gR)U%Y6@v):hѸJ9e-7f\Pgʏm *X٧ڽTIܛfpAHqH5BuN ;9mNcnJ޺ҕWh4{qFYl9/%?C;\+a˹?DyćQGL2L{I"-#?tsĝfU\E9"k<{ob;(AWM3E T2y*9>xةEPQw"M7e!VUqY9o6sJ#uW1 `AxRiA!P:PJy{MbcHyΜ}e*#c^m8wN}&YUl8~\`Gs;  FhћH"'Iv(uj9! պ]wz:W,bRTM??wezW߁hntq%pҳ?Fˮ )iEU(g iW2Ym lzw74,)qTNG7 ι[4tihPB ޚzv*?.WE`$ɰ DNy3^zkQRfjuԹX< Pq:v\q\63\xk^ȹ1bS]:A|orj+TI:yK){Q"e<1eTLodXuSAh&kf ;so!FVX_RvSVBVv8ZM-[`t"+$EPD%^dmT"`^ۖ}^㐀K19 #v/Č^G7OhSDrl|PJNb1/cAT.ǘjKeO}l̝ھ53DOO#Ɛ< 14&_e˫FqZ%.VaLʼ؏'TB)4kYhic%YSۥŠw^I)΅H+.N)%tb8,Z;ێ\ҜP?';}JTa*BMVhtPzGܶ61]YmɁ ep=NI0:+Zy31"`Ĉ=cGƅ7 cmgo*5Q`^RTI+d  1hi Cm ]REbٹ1i1!QrҧCr9(PqtE)bs$9@ mb0o+ Xմ?l_+~!x61;[c'pNCV]`xby0i8UՖº xm/, UBtlc|[1]rqX  S(ǖ ǫrTΧ;Cj`sUB CE3FTciZ4`!3GRh@2ūiQ ]8GR$s* Wk TbW Eʒ M؎f!Sbgy nl~&5T2W vP??׼=Բ?!>q2 ]ks}-D ue!O, L_kgR8@h2uy'냟4 Z$DkuJA~_Rw~uQsd"Ew9.̇s!Q/c7>vVRAߨI΀.:lj OzگOOp\Ж>=_XYCȍΡ0 *uteFT+RzLmǚ;$ԙt4ˈb*|S+#_ ?v1~՘2\?W:q!0׋?]4G,j`vs auߎrIcv.WޞmΟku5ono.ᘿ&^ bi.^+~o堳ŞDIZ(6KAUs([]ϯe(]c[ }k= M#HY$m܇N`Z3!F#lSpG_\SU% 1Dʂ^ 8G 8r"p//@)4+'׿b3!rR-ռRsEsl5?~ &SHK3T#VixAgDs2L´ rM~l.(ѻV_W %W#`˾`F(˹i?؀1dV\/5qb?K*zS]qфXpECZpV*T*Q|YDlȴ1L]: X 7PRknUʽܳeIHHb-oggfB[ĠDͤb[XСC}N/bB8a8EQa*R0ٚ׹DYځڿE XRL\tB xz}Ω.km# FHjd3n*9@HhBVhl1o'vTTWi՞P{TӧkGhǝ403%8IkUm^:ɭEw^ ffDږuF`oo)yKBv HQ4*kBN2 B?$gy ;e7Kҏ0tLV|Ԝ%5Vz\nh(~גf2倆aQ 鲖c{kd\pДrs_^ F.m=/N1"Q$ #@AI%]q͊m07V(펖]n˺MM35?‘[ .cص$|l:?/ f#,  -1(j>Xf̨.Vֻzy\#Um2i{p 4M:GKSIąvH̢, Lx&Hh<{k6=ڷFFѝ'Ėc08fITl8x\Zq/,W> ,܉l ͤB[2wh{.yrNN}^=e#Q\2]Eg\'l1bA#bv r'R"{+tq]9tz(,{+ʷK7 >1([,/,RaEXh. S&FAnbcSFpDl,ܯ p:%>F%rsH*:7Ǒ (\;no3LO93? V Y<ӨFƧVQ9wF8ܫ/!gGJV/RA_?$֊nm &XY xl"1D9ɶYXO\ %-ci=FP%>unw*ez*zQ%)˫ܠJǽ(*֗6KފHɁp)1爁52ĴqPl' ] Q wIܜZrXWr߯F9k *ڱXN?oyb1=52zH.h+6yq IF&U{Άe8?^7^X/ա!EU#ʓq~u)zS:6&:DT 95d3rR~{JfC,O `VzoB:Li1rw6F"ȿlY"~̋ k3'tlp_--iQl_Jo7!'x3Cx8/^(xņDi1uDh͟0_ތOE_s܀6㏛LOoʷ ke+mlmZW}MzGY7[{&[*ic?!X?qo⧛Gx7:sxf~ߌ;<˧ jYˏ*|QQ/+ֿ1R!I( ȸ d[bE!Ybw-dK1aܤ`E#f\ֵP1Q3r=cJ)L/>MVE3pU[Րw52.\NuWs^jjjd\`ID ~QBAfBs#5x>JYsT#攮 3Sr"FRcY >١K2 h᪡WjU1̣̃}$ϱ{ʟFl>\=є_Sy['.5b$Du}{`t[gjTdר~͑p+ͳhk SвCH?x]eMURik7wU>SݵRJk:ֿܽ$hZ.=tXz 55>qRƅ-4y|ns>8wZjڏR?4vx̢UJJ}&QkMYSG)I–z&?,[o=<`\o*KT&r4 B wOPVƧA" j>E8OQJ;ӥT+):89 5 "AҎ;*( c(!vHbPgLՂR?{N`{؇X֤jŲ=iS Sh=))==ZFu`D3FELoSJtKℋW}'MM/)1:FFԗ=+htc;Ҙ V"04+Q"5X3a.3(j+/,f/DŽU6wUF˟Vsr ]C!P7k}ܼpð(&4⊐-r=5/2-!GRhqwjUw2ٲOEoO(fz_b! y2 /δFFkN'y`'uhOu klBº Y eKrBm6OW)aCAR[L:* YFdl4dy cD촼ViS3!驨b>g X Um$мSuۮj[>:fGl{Y'3A *Nx!%BLY\|3(ZC ӧ͓-;TV?`C)>d?wg9 6N 60OOE7m0-He;Ϻ> ߠM7QDsUvߜN)&sd\R`dSNf̖{>wZ8tkCJSb-)i1S-|_9ZN!unne[Hŝ{nZukE̊Ɋo,l8`d(n U%rvi-ۻ)0a!c)!#2<&S*XnŸ7(_3xl1LZ"T^@彜Y}-ݦ=j@ʘBcN%Rl%%BF1ٯ?g??>~36ƘIJʧO_%JxU;gF"nZ8$` w,9p?W>Ai]t9?Lf\]9Rl&ZV&%hXpP%;h(v>Q_@*|6M2=i~t[8F ,~\HHaC|]HGT#Qx{K΢E&0bxd=jI`{ꍴ#hDQ-hNQzJ;kA05oi/lܭKp&ɹlƿc),gd.峆[M͕XB~f}<[e$oc^vf|$pPDjd||岵9Dl>[wss2ʫ8Fv"0 xT.3pU -q'E`O#طt8K۩ FԲ` k fiZ]r~Y%(o}_33Dj>q/+.UQe~CŪ>Lҟ1rIM6M(HGp! ƨ%Z\'8Q4|+*>$+ns8)L^ZU>wfS4Kz<,Չc$HK~_>>pi''=kAUvKN!i1S >"Ww-W©$=Ij5jS{1q|.EҕZrubgHd`xwz9QëLS.#yg%kkz yg=&.=jd{"6#BrEb.nj>q&4peR{^6>CHJ~T}C:2M1N#פAmg0]`y- !r|tUrl_.<nCܠ͓G;o/|Q]0*0زY}[-kLcrfư=%5nvP*m4cCS k9{Z2/į{}!7[c}5,|I=?Z._ 0v M)UQ,MvDsn WF"q?㢉AiWRxc>p@N0udmUMIjl(QqFhITATd(b^1j]AϷnSaC]wTgz-xw998z,ŗDS2Ӹl*R"Bx{!cKAc * Z\xR5RH."CIXY4baGyѼJHh},(Ǔn{87XBKB *+J$uGkPHEL)F N4f[vM|X ,j>4{fFo^S6>:MtnC2.ܯ vxŷ@ao STziV"IWFޞGɵY`qVg܆b6 f;C^ÇMṡq؎C8Dphe*y/Wr'dm?8F 8T8E}&+1g׈B믦H& >-ʣSؾ }"OײU'O?b}$o\APDmi#8s_qOlloϋ`- Ѷ  8R'lM-˭ɳړV=oB_ckN e 2PN0S؟r?dv_fYHrKh7+(p~7\с|e#ɥ6nٞr,pdo]PpІ%K QvQëqBFy hge':RGa$q@ 1ܪX,*Ju_9ώ/[Y&aAb@3oo}ϷsُUC4eς-#wGdthŒTŒ$WOτsx BޮO lFwG 0d?_"8g_py}8>/mz埂`kO,v^-g%O%X%_v1K6"طh80k^* ƣl`H@ 4k 8]t{GLU5qZ>bj֢4 ]2Qı@ $IL}ղAT\s$M[a[?h=YDbM{9O r |C~=l1ZB d.?zq\γsWdt4]yZ s~p~GΝPad,}òOG# _ɋUߍG|q;0kg8opo>)X} zܗӫQ`y0A?$e0vɟ\H 7.$wWݓ2KefZ 5<S-|diا+9oJpڮӞ/á !8;mGbωL0`,ƜpaA&bTF $7{NWi u m1pU~IpN̺I|hBZ*y0kXxjR-wZ[zZՔRvWjV d$bfvKJ5%_'R2WrLIFf6*MN fbuYZ)n3}c#AxQƮoos eF|l@&JV[ ,Yq+Af2>]a24a t+1ru:dKP43Qm$!oURR:ES1W=vw=Ӏ''zGƟ'D\EE)m0MB,G4й!. -l\鵼/|,R1.n?u0i Tm%4NQ5,=Yh39OE v(/9B;^VSiyk{;6J!Xp)mt؀K63:o4UKiΉ۴pr_;.!\.ŌЭ2g_\ԀKU>q1W@q8]W#_=,h"Ԕt׋s2NT`ʩ~1c6?~!X%~t%mǫ aP~&5rZL޸躗zߔ3r9Zu Oxȱ`|B+\qRR"+x5l~ByFƎg!R ф/f?k-̘cpok$x՘Ćlk|$ oǍսy IkYTEX\m 2 ^d**ߞ!Հ*IRcl-)/TgBQVY!uJ% B$=Ajh{Nv+`!9%'[08O^C~E(uwҦX:|OJx?j\4F罼~Rٺ{;JiG Xsѱ/ʼt%pl< 쨳cl/ECQqA <#KTK =)$ZF:wn* O6d|1 Nvl1?NsE$ [`4 ^lFc{]>%2Owκx+ëuЗ(*j5Yy!5u>7!Xo`cтpD#+ (+OiS\'h3QivD&D3vmKJ{礓`kͿrTiWT΄ x-QGAs&FZK]_n9Cŀ}Yl}yi!B?lũ9N=PRpÛˣ`0Z)\D|qߝ0E,f,ptlC^H,;ḌBss* 4B{ Js~ W% $3e4D/eK+3U<gчIpVXqڈdcb(ĵ`au QhJr~3l)BXT( bk##<6DNḱ2is5V% s.Dσ^vr;pP׳/q J~0'f8NTw,S$7dԄVT|@f(}#5TƘd/?NbI$nK~=tg3pVrU~Qnkm'eP4gUbS*۳ܞ'V1T0e=~U4+X8kζk+THܙUbJ 8y0J OTm9 8U!XFh6VE&2J] M'>=ß~i w?KuX||ᓟ'M u8>%KA8>4hISPّq=ÊE֕qt='>,0!"ϩM 2x@&HI!R㔂RڸBHMe2M R \pL0. 4I!2]yT(b~%p@/cQR`c _ҁ(pk[&:H̦bκ, Oj[oigV=J 4<Ųpҙ`$VXԸ셴?gƺ4QGy9ѻ ⦏uX'K׏{5`ESF>:+To<֙ꖺxN 'TcдL>:S}w!r6bn&Ω'7tOsDNF7쏁|'%olv;==IM#ht={!Mi9=9IZ29d>1d0;CerH0Ed:4-+j%2lj8 IƼVI.^ +h;3\Iz{7!]V6RLu%/I&uR\egbRxj%)?hll;TNXΞ[2+f0jys]C(F6rO& ݫmg[G&繡MȥJh0ѿ96-z2ۖ>_W\2C;1*V-ӦO Y6g}T5e<#{Jɶ=Kе)׷rЮcVQ +vDdns 0G68'"XF٦[sW>zU_Ͼ[KКsbf0<,W )"~bx7{6E{7qjυ4ueW|.sT+03 H4"g*$&JXiEY^ݎH) puKm`'oрJ H/T"EN;!)E,`\*ƒYQ-ёNh܋#]60 Au( yF{XXAzɝC D?ܽ $P̔Wv#f3x^nYY=0!ZFlvb(kVO@ļ$B{Ȍ"@[ /s˰&JMjx)7fl Koр|H);׈HuYH5"xQT!Y"Zupd-`lWL `uz̀lv/Bm@(IoԀI$*b8"#*Q;\勒~="܍W!'aHE9)؁uRmPQ1I71 ?RWZ0훘&4|wrו&/0 HHD0rG3큱;mT0ZJ 6_u[gY@RFΫbǏW.] ax(æj(\X>̎oKSŁ1?<5:Z/&$}R$8-ڳ˩.~Lo&'~~>7xi**Bq):Vj2?q@bΕ)rܻ$RQܚ:|p̓/pr06l8}&1:xƳ|eDK!`[A $5p`Xۑb拒PF}0%ŔrsX5u&S 8p}. +aBXPk8O:>C 7Li`}6KiҰa1 1%r)-[a(Xbb;%lQu_UsoHVen)҆iA qPBXz[Qեqk⽾./X .%-DibkjFeeVi6R!pœ^wQJv^UT #F%S'-t}npcd^,]S{9A{`lJ'hp"H[w23cb=LZXN$1CTY6=/`B(i hν^K5iPNty[]40E+Yje=KEgZ25.H|4 j.9Թ`tUFx!.ބnS@BMu2j`̨S# N.CXB@dúQD7qީk_.xʁH>g/T{jQDжJPM‰dr7@> ~B: ^1~P1W!vĨwYc) a [ Fͺ^~(Tw!y=(2v~Y/]/!:C\Etq2{*X=0K, >puMf'}ԣ^2A)9^ΠOGS!eY?Kg_PE )_)l`ak0BҨ L)Υ?p 4(8ϩ]ka!nokgl 6Q!i 84G=C{`,jN-il VIJuzǒsFߊ/5LA> Ģ#B;Yhk#I꧳Ҧkd%ƳVۮ1hl( Ɣc謊⋌Ȉq:yLDGe2V 3΃Í@B$x%oeuG¹Tz6Z ^d){WNc _=4퓇G_n8u1^B p@"Yt` qSr A>{6[ntfvi#2rW?~/o:DuhtjUj 2wR"(sP h?J5dMRUˬ®C^JEI\cD4Az_+sr+Ү#?L7v-mȅzgogowt-·,ކ-!G d?A`Гm4kV(,0ÌQ h `P{QzG3ZwOG蜖:2#^Y},!&ަ+HHšcnόW XDݘd&91iv e,|UhOY)uKB#C5Y29 FDT3;׭)H5@VEֹl됙tO%ZS41JBQr*:sTi٢DE:<י#-M,!2S\*R1ȏr`Y'ą>|# n6HiCJǮxL̜"l6;Yej17;&;Zk3mϗ; u|u=8SDW*,ELBǞ@A-ghv%3eV 7n*p},3v~ TxVw^||p3%#QBaaM /I5A>cBdv#N ;Hc5.31giBUp4*JܳҬҨ%s+Cg2$٫߻g1ڪ8;?ݝ}D<9Ifz \(SIh<D$P*MEW T /TF>8]/y窬e~١.>/SS>F-l i%h")5ga+L1R{jJ&iDZ2h.l/bF͟bid69k<`bh(jyfo;fhǾ jÙ1Fk*n\ KRQ\n?wk ujTSێ1DwVDJmFoĊAVYqgZ;9\>:Xc&lkXJn0ߍ)_"fQ:_mss\)f3j&."٭Wx 7``M0GcVLӉg^0iK:Kʶ縮54Nt\suA?zےuJk Or)jp{d("bFaSuA;eZ̗=c,w!ؔExp.;dt\l[rȋ^b$oOٿϬ.m}19FW[:!g"}PZ!l,d`[6;Zs@s۵Q Ѫ=neݑ2St)8銿G oʵr{Mko=gGx^I<%O a_v4L{$C(B'ÖE6\OY}@N֙v:N.Y.ꮕ㴫]t 'ރo)RQ6LQ)D}%*{g4zr?&SF  w.Fd0!3+"ޘ:o(/?ޘ\%('f0;cU[B94xjRDz ZM;k\U vo zZ<B0'f~"Hє譌eC?P-`< k(B@jqvZnPk+3̨hgFEY=e9Fr?'?kkRQRj-:*739"F7gǶfX1,iA:v(ˈPVz+ɷBd!4,ehIIJ e|/co@I5*,a!{@JǂzYQRi3Ud٦!q]?IUrX2xR|LQ  8 `mh*GJDNVҬxRg9$7n&Sght\x 0@J*ϑ?Ȋ 2f `,mp<ό\ĸ+Lj[sٕXXVͭtР}!CN|<<1A^<9[1 PU]$cc 5+64lx9)E0VS^eZh A\!]s0=1*^: @қll(xVoK0hTf((iN S1if@2PhP5%уzK=$ (uU}桬u柍\.7Saڰ{ ;?mKE9{2";inڰ7ka-qhX$кfã8_٤33ѦHIVLl$|hj5CB(05BR7Nk1nV[v1opWN*SqƒªiY*Δafɗ6MrwU6h djEFB+ΐ)[v13;7]QbO =wWsnXŞl$'u!|cBv|w5U%^|:XoɃ;H#^5m4s:~RPv쀰Q+>y Ah%mayDKʨ5,DX@P2(v)pVNl~s/2 W׼d ?~]IoYعOmk/Gf1uw&ml1=2ݝU%_If3o?i!wGG|{gźLO<;mc]ug'p {ڠK"Dlj!d&$gB*JG1Ȝo.#{5:VJ&n͢{Onwxîo~uWC|#JI&oWk cw}@?+޵Ƒ#_1ew0`}>.0à!ھjvx,J)9,{݆U,st?_+˝X_.{άW..+3Jzo+.ֳ]W%+k8H,>f^.>Lשtjy;m7|͹v`o/T_1.ŷ0;i,1'|x/V VjM*VDvs^xVl~L>;A>^o*MMI{;Fs MpIF1#4=_e% gFxG>;pZ?\MEa"X9{U~[ZH{<ɛ~uHjS9\zfNo=yEB`}^A0#`&bκp㈸ (Ql*+1 ;=2MZiv#GqK7<㤥͙̙݁9s9GJe[.?N3dϻNdZɿUx̧_}KU=i?T9I֓cb{%\ܠ}VKrPrV+I 6"Pq;Eh>Z~f0Ro KƵAw? cê>itov)^-?aq 'EG|"݆%BuED$^٧bC'ijM j:);l! &J|V4:>޴)8EfaR8^1'W\^ щ *BxEDV^ِ $^aGvv/ݽZ L䬪l>7lp}!~H`-h[WO|n +)c"|+bE:?}nSګ}0f~˖5ERn1!su\^rE"z @;w^c.p8;ܰ 䕲Hq|CqxQG-ar52}#w.̕Wjc#,WQ46e&S(kvBy_A#MBt|{Nn֡PQ]un)s.x4nzC7AɂZFLıF~8 g";( L!,(ǣtM>2R*nA( |*[xr~Qȍ.Q]^rU!DP9l!ߓ"RtFx A"mFPSK1Hv} S$,`'w@qOz<"h8pTY`Q)n]@P; 2Uj]Ü p ̓Mjo0wNbܥQ}Dk%bj|R[=உ9̇ 7]|Oe}ᇋDjWjJꍟKkuP}$^LwBU;u *ԑ¬vjWP]V壾eG޻<\{0<@NM}lakOq)c)H|xu|E#ƎO֔Kׂp6=m9For{G3T&xxwIv΢+nz`F Drck `ٮ*XB-Dפ */SwX< 1(e< ^wzŻTwĪ 3|.5j#YxVHab}`0Q1Pep(.Ƣ17rRBLFt %KkM].gꯧј7\a*:PO?/G 4ۍ6,"5n=ZɥͥW7_\lqujP$.jJwAFӥP5d/'?Dۄ{:άO׈="5"wT K/c:JY'MR>37/g(Ozgh:!KE+:)*%SuJ(q1SqуpRmk4T#SSۉeE>!HʢKG`+!6pSmڀ=guOZpÉ @]/KyPZE^-.PA;/NoV̝XǣK Yzn}Цѫ`;8dG#ހ,~T<{@"iTHjFUFu (oM<%ڀ\޾jޜ-΢m4oNy)ȹڌJYPlIj]pDvEkw]NHhzUE"Bq$+k*+8tQGYvх-V!pD?9>8uWr+ÊMRA)9Z:i謍ے$ʈ⏔Ddx(B3 S^+FxDi<~3j}Mj遍'35Rb1  7Muu1u[Jq:,{.BSv, @{zTR|^_&7S㔴q/o#Sm EG"b3N6c_Rޞ պb7˫դ4{mTP)L@\8˪=;t9Qλ%H +GUd2eVumdV\iR|jp "-~HUttbM GEWj#Fo-٪zf5BcXjQQ*`& r+f?"?sE`۟d7F^V7> \i[V9?1gՌȱUoy:7ڿc-|x91o?~{@yumQD1ݷ)).ЦVmRC[a3cnZZ;bѳv17.~?%'.$yn9B3&D`EW1:`AU*K\Sjkjp Лw9ג"REԀrk[[vH[{'2`ʘɻE>k?\xcB|7SG>f`=)%(a p(uІ!+m ZybP$qv0"&e@-ZU4XЊH[j0Uӑ4Y]ZFHي>)c|i )?m/}Y 8H ~gF83ęQ'׉[c٭նXtdzl=s0 PiR.ͥF+Y:@}.&՜;:klYPiYgEJ!!%lRز:UJj%ܷRk =ҵtn\u.Ud H;{Q`]-z#yS[5v4U5HMj72}N .>&j'b+9|Mjw#YP; S+䅣0E+3ܤv7~IOj->"j70/h1O= In>vڑ9Ju#@E@qRIM5LShmp#sO䰄ymI&ز1v".,IMxq}NQm Ō8/Eh%S|ibRԎVBخ;ˤvWaAs#cvw70݃vwE0< YvA Ep,0mT>i[vڔNEc; GorkP"aq(zBclT:骟8!@^ZmU^\Z\y*Rc@: AF8mj̩1PI6CR!&!:^G XC'nGC_@d]PЈ B;ڍ1 u 6 ]Z8(ZO>-G50~NjKK j翼=RD):l.dZT(2c#Es4Tj`K=@wZk@䐘a2}$_!y)op}9 (]6g3x7Bsuʻ =3^gU9_c`kkϾ~j:o|ܤi-'&@fXm) !05kɲ,n{k)pjXw*DVzg)@;T9 $JI/E2a~M!s% .\"I2syspA2)L x$(X@pNg/5` #q#L7!YN |>ٮ MIwG[%! ?Y8!p;0n `2p9剩SՉ$hEr5fuFm!\:ϑM-o-k|-Yi k廋F s@%hrv%Hp+<(  A.]Znfy0#q|=(`9c]o;CNo})@:$:=b_51;]˶b7A޻d#śڙ\Kk#9cG}b[rWAUGt]NŽHnRZiXZٓ,swއeTw:HFlE hmD3uLSGj տ/FXS+WMOK!cN2vq͆M/0Ј)H/vÉfIGr8_NJs?2G:;MM/ }F~ϨFWC 6Z|%#y7`=>#GM+(|9}.Nuʯo߬(׻ o>=9E-c@)F`'Hd1wB3fQ#z1^7%n5[5НXև Ϟ,?nIc²NtAڝF~Z]zl#3({=jc1هφsm؅^ލ\!j6*~y:vէ>?搠r:JRG8rs^ew՜,9JcM`5-eKb F>m{OBo_ y2[w_kK Nt_ @1 g؝H_mjt9ɨFT"thʣprs}k:q[X_\!rmd:޼z5KOİm-T&d8lDD6V39C-;ʡӢ X=_#_9aJ/nՅnSkj0/U;^kcd:imU(V,{'0KZu tL V{dQn^ y2QQtGƇvwUWׂrZP߾}VKi5[pTRt8Q`CIn T͜ ,xQRuYuFf- 0qz]V׀3g%J&WFU'kltcBuۆr%K;|-37NqA{>|hLߤsBKjQUJsF'72/dμ{?޹qF:cŏ_ }y蛼5Z>~#PLyLAX/ڃ悡l8*<ȘVTF{IJ++UUxd]@, r-yk(ɕawDn*Hb]) wDy:QIYPq{W ]9s'Gy蚮"*Pļsڧ]|M?o/˶˟?uv»OjˋnpF_w~?}C[57n[| .~y3~?LJkcPc+RoCB#xph%cLeFJ C5q#4$QZqMUNo\sT-Ր'c|D5:Ԗ#*S-׵ \r `Vx ;2Z7cu>٥OdGsi8mݶĚ(8B|9,}JK2({wEPD-M e,UqO)1 Br{Lٱ&l(I0LhVGoꏪ8#Ud$k8nsٽȵfHuQ3 _̰Gl\B ؒ!Q2E&ѯ%dRE9A8̹Quu:F*7]2=gLhRzw®Ss*B9J+fMf3TjI}՛k&r 'Q>{X4y1ѷ/rz h9m}^}Wub9 N*T˪#9b2Fϣ7,)ٛX_ YW/6EnOY_etdK.|Q܌rvbxbn/7_NwcRw|O,KrQyDa23:%`QyR(ϙw9㒢nL4yȭvkݚWۏp] y2G>)8nea;vWj=WAU%Rg*NQ,w]hڬ<GR̹3$q‘ӕ-C1?|%ۑ55JM:餦hU5e>] 5s!5}U-2kwD;] y2O!H}Fi=Uy*4"c_ x[6 S+zBWİCscj<4ZuYBT`Z-fyvG V\d.f 1#_DZkYs+I[P)52iqz4f̥7h}eیb\3\ƔR$忪lvS=Js g!}]hԵ-4rȣ'F*ӕ;VMFk6\Qs2uKhQ9*dDZ%lyލc"f:AZՐ'# 膘IP,ZնvsfϚxk>Q,/;6>}r1)e Edbÿ~!2pu76D:D5ZׄRliW6 ) eކ[JScl{C hlH$ 2vf> J#YvunBqP/op#NۇO e)Pq$$szrdn *b"u~Zimˏ\SԒ"CTF6JVJlוpV:d5lS 7G^LC2I7XySD''hNa6Otom{öFS:"{9q".j8oR;brCHΆ@4VkEP3 w"s%dd-fNX7nN1"8J M\pupJfOUOilvf;A዗=xu4eTY1Ny7)#]^Xr392s\PLy~c{?4mt6 `kFr߽yԝ Gг^=Ɂ{QoM\n[>z;ݜ\e^㈉9^v7[Ani3h5m41$3-k*83j@*k^վ=[B؅hԵB;fF2\ۥmmcO [b%wU+e(w#E^ZF1OАe({o/^ uX݇9(_[0@5jר8AGF&5D󐁶`015j7 }2ڽj'0@݇9h緋#OLQkqF&v6SّHN0*GȖQF)+iϥvNWm+ fv,8JtҨ]vB݆l̗Gv\_X-&oH`}h/sуhd]=k}py~}34>g|Mǵ6B.U49}xkk_:ΆP;rjŜ/}LmqǡISǎot|H-Zx-Ym}aNTi? 隞ntQ|3[ND#Q;-oH0QF!l:," M.z4&\CE#zPz`Z bQ|ƴFԸ6ϯ31E6Y|rk[kX]] HHbEYDO6WKH؋wΑ ȷ͑A?w]x: xNܝC]3u1`r^PSD)XBPz+] fY>ȑ%M9Fr!ՙF}6MmR/A"%{;%#MFCⲱs=َk Ԃh3+sf >>E BT^[#N+`F|3qx Be2dL+UT !WP,Bmwe@^ Z`7,b F)x*JZQVyzu-:`r.2DΎ㏴!XUe RIBQKv .DlOJkXcq)[pPH@ AH" = vMEHV`T7#ޏM1QvGdm.}P9Ljy)ώx6!X'gk_`W ȜΈM^bbm ZRilVW<0zeZ&mD7g_sB𕿆_*[m`tȜzت B3gv;zyƀNsH;lߌ_>r1>k~zz8>\_*,nn=upY:a>Ie'WbwyX?\X O@=PZ}S.3#).xi%]2 [G nf`Ly!NtHj"D(gzc GnLAn*vz5¿mUn̨gFmg Vl+X3v>})𧿆c{w$pr ))Y_ `uI#.h~ Ii28BX6Σn92t#;/ad;J^!Ʀ6 ~Ȼԝ v$p JmmgƘOHmcc xn廍8"mcoyKtD>R{ O-8ztsu(Q':8{_|p]\T&DgfoC7Jtt3>esҶ=0^d>0rB1YtDM|80^;//N@as9tcJ]Gc`yz(=h5 jAPnvk>9bV׋Hx.F }ч?AB +d[גwu0c˻Y:R"Oގl/ :HEKSQK#Fd&m-8y:m+F}wXj(;1ƫ,+"5T`}2|+;[ӿFY_ euXNoW4:CQFYV S~(ʪ+=xh׫ևiة(FgǠ $b骮~a0c 3vq Db{lg&U/DʦDI^,vK"U멮gIO'VYORޘ4Bs x`u@]yD),Bu!}Uphʢ*h"N+Y߸k]1]9ඨKQ!;"|{7|s{!$ڍY>7@(?t)n`fyI! Ig$y[j*%#xyvx>~ɯM)vEgA~Ws>ZkGNMXSAFҀ7drٽj|sYtfOL:J?.ce*%c茁Lg27M39L:#nѝT#Ӈy;'L3ˡ 9ۏЎ,E R' b kre1v<RMIE$Y}Qmq37:厉)w,Rְ73=:>z4Sm 08Y9Es=08qGpj>C;q}LB!hRhr!OB>m=\Ӱ/|N|/]a }!rd ߜϑݡGvZ&1i'c XṹM힀>rt%wXv 0b68#xhGr)c_>s3@ޏ͡gZʍ%wxrhׂ9ΔXEk= xG (Dʡ]?XC;v3#`4|)vO@uZJr9{.YVd\φEp{7dsh׏oVq|衝ol! }G9ˡrwG2'vx ?tzr'iy5MgI#$8~tS|}9爇eo[nD;lXnz*{yo?yFc俅q::ޣ؄ap/7k)ipn%լcCB3;Vɟd6VOMM>4rדӽݯʇɭjy{jiemr{?VKm~ */*૊cTvsloZ\3)j+SDŽ_^}ڿg4bO凓*ɍpMA7Am/:(VQ$#:N cBP%:Z?W!C\OeO~rz9SG974n?Xk0y]i*m6TG\K#Ƕ;m"EL麑4n-ꮿ@7õͷ%FfjwJYfÐغA )4ǔTDюb3.'sR (cOIEN*?+z~yn?_߽>i?$7(|wu?>UUDl jD#Q #?NSJC;O~T>X; kJ@*Ć SN7^7kOo}ږvL;'6oO]YrUvSi_sDn=]4DeP\*"JojtCU((7rDy{j~\COn޶&6 _ X^]U \6"+p25 Q_!l+M%ᦎKc[uR؛LnM99[G&c}`[-2SFJ o[$/XW'7X,=>d^dZb-o 8yvdB YV[$i?`kOXY˅\w}v:5硜?̾Và/%D2vkhkeƢ%A.vHnqt rJ{󎋸VFu HyNt9ٙa YyA?^LZ`x7" EfwIbIznv߃?߮?\Uڋ%:.C8ط:#  f;*Lߴxs"fIy |O(DI,f_vlejy_Z -א~5pJ.u&{#sIe,k.셬e[\yl>!ncށom^2 ʹ%Ff]p9jy qH.:l]ҩԂty(-5TӴ-$J"Apud;sz5ke߸rD!V?B@=q?9{_mgJb!Ԑ彯 fZͮix3`"z:»A?xUګpɍ_pwoJB+YqUEUcYA7Œ{<˳ې#QB`z vҀCr 䜕uHf7g #h$mU{Ay/ T#O,(^^3~]hvwˊ!g5^Q|ZYʼW4c)(VAt4ŦHWz 2ὢ <*OJ>1^o=sq#Qe-XqxE5l|N[9{nqoN-]YE!6*+o,:S& d i !3W0-vʖl:qѷPk2"G;WsTj(#13ꝰ-i9k}?Oe-AtuNf[8d~B&89 sUF.7 ON6·협Ia&wxB'qr1yˤ%pj eEqu]*eRH?\E `I%:QJӔ4Zн(&W5$7G&}Ƙ .[YL' ôdZPu#JrS DFQC Ũ&sX( `| q=ZT.tHs8s&7gr-<ƕ$-uWu?VUL/WIsUs%/y^^u_\&7($V[Htr!SN.$*tm!14ec4%'"}cgǗR&e4ᮋJ7(nP~|K>L?y}})!F?SXZ)7#A7 w熿{c M '0ǫ=~xy[Λ}*JNeِJ,E%1Z%p|u$uSuAVPU6B9:=7{i@vT2'*{S@"hbCޔP4@*.*Sq`+!* J \eHhR 51ɄxK&\,Vlh;z~Xe؅/Fc}+'d1, .:0N>FǮxY>v-ۑrse҃E2Wt2'髻‘k !%f> ׷c@1i#O.-Ysum/F|x"yO^"cH[OǙ?˲,~ ԙg̾ʫW8Om:Ypy!+?Ҥ5M Qt–a7MI݇=p6hh Z*XG7b']psA?ޮn_oddT vںOݗ!28T*Vz ? ]*Ϧ Lao{KZr^Jݼx٭w=Y<&Dfp<^мr񊵹rfBZ .a9WUpCgx7 +TN`ZǍ9Hq8 SQGw2bu _WړCiJ˜/ØVZ&S̙UX#7gZCfzc)oKO@њ|u(\\PIT&@QsԍJc3<8՗ۏ7.z6;9cZ@q{c.r9^kbAQ [oǽ (i3_جz9qc\@PFNA֙5?s$@>6MG81»ye=b IŌWe_U.heIflF/~#;.WU56o>ʧ?}~aX`e:oYvhD +[;MIH?Z'1;5! ȤIP7TiyܟލiPiF B#:{"OnZ+PZ͍ߙ.C1|4SAl5;;u,xYC\H1߱-J)jUR*/{݀K?+#;SWsw/V,VTW_nnWHqВ{|"MsJكܴT)t iyeВ{;Ԏ{4YW@<2)duZ85J6[i;fsJ-,+lr,UX8k4"RY9T1u@j^9]k+ 8h422`Fӻ3]+ml)S'(ؕbKoP3S*(o+SU) ֍Gbr%IL/d n,Hx@RnGGs&A\ՐAhlm*ٵйvJ:p-Xa7}};8>[i~:`1Ҍu[Vigk<0ۭ5o=5  W>Ӧz0ZUS`RF,\Ծ)ʨT:._aLGmk;@۰pO[HO!%.fb-X:#Ѫ6 G0u+h)V:3][ZkU϶lR3٤WO:5Fgo_)(ҵ;k,J-;(!.Z, WD-å֧L=)ށO/7-ˤ} x '5ud _ji%E@[ռ#^Fk]vqzjcbb)$bBFD;W{<Ǘ4NkDfCD^HN ɮ"Or ZBE[!`xVZ v34Cv.`"58^(2S֣ (;؇r]ε߮eݻȮXSDn5vε_?r&s`W2GV-VZ0ZW+ËmID-ezxsAùx.dw뫴IApK6 ,V7DX4l;˩ 蟉 2ڲb-ڶY5ZmQ#!u.=w \0V,fYh- p% gA:`GlKQuBE i͡a6lMةv{CR!bU5\ k|H  =D㞽<{ޤ٠[ˋ\YtL2tr>~~柚n&F!h?}ZL{`?&0p[zВzV@fRJJ:ֺBX:Bk{{0 "OےdrErg1@iJ>h)LGp3t̗z#zs0chs.c,)m.BT] pL yL.8'C;Q2XL'wAn~p\fbյZ\<튀a^L 8cUp!xu)`Q(JhL^MR43Je4qiAm2nu[϶Odcuf* ۦ&|Bo/Nxwl8(xXٝh0>~N2*BLrn$O,*q,уO)DVP2aLW)⊿ n#[-kAa>gZقѼ~]efޏ1]GgtJ, xӧH*7KNfbe%|~bjFYw{놶F)nx{ 2 Q!D o=E@iX& Rh:/9bHT;x2wOv(YZ%I,ʁ׼ .Hȡxodkݗ\*/qv_{zHCoH}VxW x!q e. Y]@Kj bUeehA Db,`8 !2,)DI$#(Myw呉2zs!+L^$r?$2ʽ92Z'"LuW'Sm0c aMIeJпmY7*^ yxr/#ϴȧODxp: Kk5 OZT.dV;RԢ^)i{mA8Y?S&/,5 qyu;t3_JˋebiK)t>k4Ⱥ $eAtHN(F,+`额hYd:܎9+,9H$* | oa5_hUFB8We/A"R,xBuƚimэ<tMNd[m/櫄wzTk$*r &pρ}#k;oU e-HQ\];5-\~fzw|}Pq켈?6T9~Q?mfO|v'%ɯ b94_.cC$3/Kg֛A2tdIDH&gH{̫XQ ˑua2EUqq߷tRB/VoniUeZٛYQks%#/u €Zp/e%&=o|r'Y:U5iFjM/^}=kzۢ䶨'-I-j}͘pɳ Qb#J ) ι:"j2.۾˟} 4Љ~hN*P羞W9 3Jk ۹;Jv!n(C2׮y`,cu Xؿ JK$D??-g:Y`1`^(Y-F T-fy1ȡxWMeA~ %hXW$vjK:a`:YV7mgbdfbn*x;Ye}ꕉ4N~8ä;oXq7S*(w;.[m+p~މ% j[+H;dI"2?VlO_K7?&FG&dL%FIyroG-sE?*zydǏk2JfZVng/ّҫ]S8 AqLJdȒ 5+t׿o H;׈,MB ڼ^>h#ӈ8c Q>mCmCvO@'3{ xѶ"Vijג}Q|%I9,YyZPV_4J:+9[)\sLl9BasN;_zӕû{?ywʼuVq9}<\]ekGGGmaԓYUhSy`c&pT$ _1O`v)ܓшm8Z)"V\G/K7s:4rpĘzӲS_|s^+LsB,'<:wc=Jˢ__f?P1BY(ex}~}g|_#W=7WUm]=_?}0}bzZ#Mb?"T-~)?P\M*,Csh37.D|ZL({yg oξHVlZmJYf>԰O koS- },P!kRc>֖hE,(0ĦKP&O'c+y6vGl͑ԗ^ļ_"I3UASiW}]ZJ N3$ڹ5@Z޻ʅE\XʅE\X+.ލ$ϸzƢ7٤QBIiД]Pl*ۛ5ֻw9>B!m(Ұ}V/u2+:[”$Ny^dTS)&p YrBb2FK6%y$WMH(( { tZDȈ4'L3cvzyEI$ˡ׌VpfIQrNPИӖ(͊2QrAR0i#0A:b7jdFCQ(,#vA{ށBR7")h㖺_^\܎7_]qOvR#l8Z >wi QoxiXgx`HIœA@ W]ƪ]@Q UhcؤK =)YO$JT4t &HIDבx 9BXK Ju6 mVUO\UOzՓ'ĸ ý['Jڕ dh]JZcJ R CSתm[o[H{.R#Kgm6(jY'LNIG:_On?m}YPYӧ{쓶φ|+iB}N~}⻜A_ݥ/'\op;F0A؛O7C7˔%5'ο'RDk2UjvdI^ɉO4gE4+"ӕBKٻ߶$0w70~? C..0 >Ʋe' UD&LȮ_UWWUt]UqXsU$!rϹ.S_ Lh:&rYҟ,N ^&V*td=;g{OF-< l j s LuXgIy6)&]ۤjR^klM`(XiyxdU9xf0Rb LݏybsVya}tpGQ+B) &./Յ{ԒY9A8Q+W| ?C"u<5&bإ%N"n$2MB.i zDIBzŸN 8P~m oLx`i#O@ oe=J*}z| pyb)XBDZ<4RV Dbn I D!Pж I%Fi p7&jO C*Xtqt倧A8$hPr?!F'XIjI) aİG)LaSIl*_ 2!`pHʘL  qb@z dDZJln9=Zіv;;ۦNcn%w{AtyXWl SlgJA=p9Oqekt_qtK*z ?m.qUWpONU4Kpoe*3ݺ(.,R%:HJI Ґ[l*[4!@8 .RyS5 o o֒U9?7k`g린WlP3,^(s:VHpNgn5QOH/ePK8?u4Î+!RZJϮ 6&8ҝg\~Y-(4|o-x[S^+Kw?^w]jn+ZSuXCo+Z}SAt;3g~"$ߪ»Vbόt>lN֩mɉ)Gh6[&1<((9[bSg!e%eo\~Yپ݆lb$g7ZI&hVR] c%%r;㴒3fOjaS 3&k_!>H%JΦh;h?᥆[ȘT!iByXT#8H(T%x o--ty:pδ}f$Ԅ~"6$xSAFTo7L5vSa[T K^yϳ ·8rH]"U%tBvMN :#qu;툝ۑRr®~kH{:r(:.c ;,vr®zvS?߽Ɂ5Àܰk^k(yZCz\28$ Sepݧ|f"ql4 ^d~z!LޟpatIDdV"MZŸ_'^HQ$J1H E)&A:5 \.Q9w,o]vj  $KD Li7.v!^K 9`SvX+Yv)RD2œ(0TN9ix*'z1+h)(z0i>T+(m`0NiIj<¬^yWw} +h;6dOU'vM?SKג]5N߱:$cȖ  S鋿 ip|oKgrGVDڕ#zo~Lwfe&iO3E,ҶzAAW\ $$X,9q4HXTB5`z' ޽'ctݍ>1m@hOm&u-(%d))5WS8^EWf`|N&\@1l5ߛj1t0yKNȇEf.ƕo@1̗Ynh<24V"I QI "F֒ 8*((D&b<]IJ&)=θOi0Tk-6e>"5՚ϫINn&1 _K9a{mN3dC)IɆF8dKsoD/_~ƆlK:Y@n'jZ_kiمk@"&4%L$`D $?9IhnG%AF*ls&PLزD%1BqĉVS e-HQsNnOƷ|.E{}JHRujzIA"0XN `c Kcp(7vgIIL3R4bFvCCX41vaC$Ͷ|ʆl܈ n3/^aT=kQ$敲-G83w&ZfF<%~6һ:o\XXh)ΤeF+x9i5LGsۂxg=[Y8xm9xm9xm9x]R+B}eDZ^ m)0qڼTNC޽M><Ҙq%I^xKl{;PJl׃@j]Y`;-i<>IFA+2$jL{Ғ9/>2&kGjzC.N+2&&NX9a=z_tW1 K(n2\lvbboԚY;H355Tnveez]a":Md9w$,`uӡ; aRj xZ0([ vBGBD0C߬Ic!G 4w(?ppe)Zj*6H^(b58 HӋ Ie=b:tD h7|p=Yr(Z60I6Ԗ=ٳ*ދl15ڶ"ړ9("Zx`AN Ҽ8 HE:Ct S#:N8ohG6'J#XU_߾|V FSmN}Azoqep] kA1 Zp?}Vd-#~ZvW&ǁq| 1 ݝŲmPAto]?nwtg1HuO-mZ6p 0awz,7ikjlbޙx!ƭ|vV_Os$I`k|kqmR; 7O6)u80C+ɦV.. ; [earL.Ko 6h>* ͞1+<MjW!=C'dW:E`=c;uND/(zxbY.qH=xV3SSj=xלr㭿%B̖6pKKkdx0:$EdQh3i2gNQEm8Y}gokaE~1iH(ULƩJ[ 3ԑ"LPd1`*DB*56 /%ZǬi%/P.c9/UbQݢf|xU|uiTxSF26RDP0 Qh%$OD"(3a틁 g6luA9,׮7{7G= SnV|Zw;x;/ҕ.uDH~_mq:+# 7j4i _Pe2K#|YWySY&Q%"A Fi-6 gaiJZ wuU֬B;B_"joMS$ >J%Kֱ?r-@ ^ P m$ҟY:ݟ" :UdC4<ր=WĀE"a{x1k^ˬ`d4z9 V-A2$No[ׄeloRAw { NN !"T$q&̶KXb0%:48#X `R*Nbnrc {06.g\ddc&gg/a 8%gb;?->gS3W~[ Φg~X^9+jo-EˉInéOɝ01%V>P 3&M&vuxڷW?]_q#EUSJ/^ gowv'7!вrYi%ggѳ^99̮[Szz?ϯo5|ߣ0eruv4|>a|x5|oݑBfWO̟H/^#[/a>tv=_|( f*~~Y7Gª74d"c_Zp(T~;Ûho_}&L0f~_;dir J\³}kd>ݧt4)(˩ު?Ez1KA7|\\z=S_}?'|b{w4_V 烏g`O#~XE8RP~yK_*NoLRu#+z>~~![+ FՏ97fK{:`qWY ,+?8gM׿A)>f9~ ~~4^z8Rp ߿'P S( |)*v_FG7ٛaK).^/v6`=ݲ eY)+-b77(Du]͏&E8L4LDTT0 T@M(1F@sa:f g =`"0] Ldflc 0.݉?_>\~sƁ3i+{1\؟ޮ`~ ɲsam .QJ:5a;[]1SIB FnRnA!l.b MZILp>7o!U\S`[r@r4@*՞+)@>[O p-rM%'ɹ6Edy%K6W+"iX%7pf=,+|0=4N'ju~*bGX69$;8:2 G8 Gj|9 Gx,q'?φhhY yFKuiδ"` 8jmZ!)²H4-5+8f Ri4J#nh gu֭3W$'AY7A$ EY(wAmq:PV ZJ,'WjABÔ', 4?a|r_O6PX0d SYбLIA)ڄH1$7 ϝ-"/+aٗĞt`¢ذJ0wϋ+0 +5OP>G Up H(s 79FYnl?k!ݝ=5KNݭ:'/洳P1a k(PdL`.ˍ:'7VRޛzjMt#\q]$Vܽi=iJ`,2Lǣ|r/M]-0r+*F*[:Ū32(_瑆 4a̡="l ΁WCbfL C-$Tr6A>]g0Qh7MxzzU8ǔ N*/(9W~+`Z EWClMӁ)o-bN7%pFe a4\Uå*f:*4 ) ܢfO B8QX^ !/9|66|)XlgZ0:ԃ$;4 Sq{)` ,}6Xs~@B|qrvr:b<,γj',TS4 6DeqKr[K&L-,Z2YՒIWa6]Y Rk/5P`>bJI M]&(rڔܭeS.Hp܂CC TRzSXQLe4 e/jieEVWhgJFa(!mUr mдq({*8ab_O$*9jpB=MqN5IJX);5Z 8yJaElшR$ ֈh0hUWp\NMTbT*[+C L`J>r\ϓi9V_6d"tN" # 2N(W<,{ጲ>u=ٹ x1HbܫA J-ޒ’0IoIdoIֺ*ϛ HϬȘ" 43q}A=)i^ԛ_W O%%A^VyyO`\/S-āْ}ޖ|2DmޖmZ>[QM >DmL@29=:0עߔ[ ϝ-"/5xr7T ixS94K{Ge wP1O(g'TV\Ѹh4@S+^:-Ǫƴ"E5ˊUBH%BhYZ/n-t=rb }z^`/?.D_MVde6n{4i5ЌHr+Kq\RH!h# ōM$D5_vHj\ކG8-,k֛-Ոc/G&aCAz7=sokɅm$vB5HD3eu\ }rzMoGR Tp 'Bj~LCCAw;Cm\/=hP 3c>Ę47q;9^:,v>9 4e[RďhgcTci}:x>աO.nގf?@?7|?؋| Tu *̪ǻl}0)tͤ6Fz,\?8@]*"*;F..=s-ft0N)3Z=ŋ, Z Ng=H-KO~:Nr'nǛj29x::){ԲNMv$# /9!\߾z:Lv2b c@ɘv7b s8Jhqc'.` TRXkGTlLUC?QNfѝFEWuDqFxh|y⛟ u!U\Ǜ -d&&OZFhCHeT0R8hAR? qDW 6i`{Tb-(f;7i\-0Qū2M@lF,ׅU"pb 'X S*޻}%9'|wL484W| l 2]m#昤ؽ$/>[RI0s'w>8^ xl3tafLO*:#?94+}e*bqIJ (NmNI јft \yF@h\zFZ}~.!op3cƭu#=sݛ^XH:W'gKm_)8\`G #6vǭ&l .gj |rsq4v3R4ڨ^,Q8jY}%=oˡ>݌1vŘ>ݜ=ǯdc@ 1=t鉐zl+]G|]gRr*@Ev;/ƸqY-@#DgH_*4ll9 j?!Wr] ZlƃH$7RCue;auHН]]-ySyJ mn Op4' EV=.cOd!6(g•rq6]peׂWCyZxi:AlI}='0^|N:mөa5?IRúv(/'.'"p$T ?թ"s%D*8'%h-ᖾXqL%o)ּU[ l/+_?rH,l-MQWbQ *1Ym\Fۋ`Kz]Qĭv;)=?)/qgE|˞>i@&L|ZOyCsez7 'D #H<52@4}rbr7+A|gMwc/aV3ܷog:fOzla_НÞ3EmD>a?NJ(JNv)AuyM@D" ZPjYphłufVI%{ v XW^$;Pj o*\B0"i ZP>yʹ@t]̿@(C- E)ܢ] "Rp@` ZSi0 m[$SH>`2yJ):~[C3NBػ޸rW?6Y"FLn IS۱5#[Y-=nj",}3%&oԽ|mp;𚧄pns*-ka1}Or8ьO 8N3dͤ@w%Z+~%XK7kG~W} /ױmxЪRv>9>l w>EŭԽ^?n`̾fØU?9>7Y8+~s}};QfOH;#Xt [Λ. F O9¥8i&&ogm>aAd M<_#oC'n;[sA_a@sLK+sr ӂrWdx7쒱p۱9NZ{;w]5O^ֻ+϶.Љ9zܚ-D@n-V+q/8NBf<,29y~x{u`;Y%M̫0<0v8ѷ㨌\`ݾOJwzĎoYFsb ogI,P=9 2Z{do>l/X.͸%TըMٱ3ВS9r*]ES]5u)ԕ)(KCTrR2;ٱ^4_u:%"bRpNkL029G0L䊖SoP(C cOnz쌐3:7J5^s3T럷Vg/$QI[v M$0-Qkɗ%$4tdYur5u 4D 6 jQ[12`6Q 2=>!F/@LpK1hA!f$P8ɷ ();rQu#QRI@cͱWZ>4.N{iEm!@CK-F1&Hw^!LB{?| 4o1%m"o#vC gIce.h39Eu& b*>6JAgGbLh18C#KHcjian&RM{4(D p#eY٠؛,dJN'NşfPy;$En fnޡY"Z<HUSYmtV,vg5/=0Iclkgpq$ft%⬴lTNU.z=w"wzYg=kQ=!6R84>;c6H=F 9"{zOaq:wS^_~  s=_'^osw/^R_2.,3-~w `uq$ϘU̪+QbAV :NUWʊPtvAF/h2OvF1Ybh?t`7F4LlL1vi5 CY9tKmfKAXW^rBIFbov;E5yֺ}i.W~3F,=Hm@ ȚF g,̝r;6 $U 6S*$RbQ{*\;(k=Ȉ"1AM^Yc/Px-q}TR-7Tr,HkCa`.4oo_}-8}-hn{|I_<~jW@6bl?둿|fxֻ'T%A' 6uÇ>)TO;~ҴU|n{zv._D8}8*q.EIgC9q HTx^$?Q}z'R:99c]BٸsS6PT Ţ6JIB~PPc}ݶ:h̪rh)113]e|H? mF\,Iiu>-0DaBX6-)1OX&2]*7&RJcA{+bxM1)A?<689t`Jܓ[SgV{n6Erم@ћ _1M NN2Ĥ(B*\ɕPIEsTQ$Q02aAUs1;NF'Ҡ}(z`>t,ZwB .:wƍX5;WU[PJQ&뱦5]S._nn{q>aT#.V7!U^_r)}P=uvnP(j cp˫|]>%&8dfEWq+?,*P-%$nxF2Ï8G".dy0zcpϘDG8 y=Mipߵ;aUCUB>yf"=xQўE_˫_ 7"^"17ATVeW`I$-*PGb^;1( &uV^Q/2:~.D&qg=[#VR,M&r8ʅIzQ('$`T(s2&2IhBp &p%Pk=՘$ I@:?pvtwV@>H#&%7T2x2 ;K U]qH>F"ʈ]$Lзi}:+NAh06@FY\ 5m$;,ypoLΕ'|v4  C)P)"Z9܃JATzSLRl6gi){i2^k1}eiS #eOhEaZ+0]y#~*F|Db lhGw*NA_ D/Bsj\4ޫ B=? (Ma&R{c4W!*Gi;Xdg :2np4z` eL#(Ŋ{DEfN+Ѧo 2jb ˥xAAFgoF?tjsE}'\oJub4C^M;x5,:'(&:G:E(:G|\M/E5wMφԇ[ |KE}i\ܿЗpz|fod|\>$jI^$7 /ah"9yJ34xO I|i~k/%RMW-u+ Weӳsq2*s&Ϡj{& )r x>3 =P49?Vq(v?h3 R ]P5vB9XLǥ,EF^I+^ `{F :6r^AbmX Xg5{MgY!'C#(B-ױCMY1$ H Is,[w\T F8sfiʜ@A=H-k%T017O 3rꤸ0#w@( O 2vDrFפ LSVA|YUw%`%C }5̪Ӑs4F\l_4 IRLD0Xua(p~|6Q("EJG"HVHF >mD߄ʔ2$.rOgiw'Z9©OvOob!D!$ &2pX:G91ޗU#9H'0hYs6p6Y#L{fw~PivwN{T|N=SB;j6@(9sH0l|4wUxYwVhY@1 ,)޽(Wr%:ܶG(T z:4jODG+]v|azhsPp Ejl_Vojq*$ZRuRuXm xjlh1q:IqP%֚.*qpc0[ŶrdՃ\9%CTs5wP*#zp,'*I@N^B-:9өRz w^W=>w G {pD 3j>cBfQeBt8}$֫pH{1ë^@11SorHP7 xk}s3Zd{޽dݻ8N^EN÷-JGtc띐\)AxgTD9QˀaII-Lhmƅ#X/-'Q%Ӕ4{D@"LEi V[Ss ԖDbcFʼn҅Zm橚 Ogu1O|Zܝ"5MPJ<:[6BeTD t3Qb5SZ0$HQ"$ B-~I+\(Z4֩6՚Utv{ΔSY>Sۏ>c Kš̖I"i൛O=,X/~Zίz%)++CW~[O`΅^)kF^O &jE9.~aWHtoA2򏕄ڐ gWMz#e_]'q {9>}>Bҁ"L`ԜA> D7ә5v {Ng FkG<>'mRx0U`?}mWg lWփ7Wu%_Rzkʼnz]:vnCaTc $jn#pGC#{b jcZ][N[z ;u¯W"j7 &sw6߽O3?],߿:dӉ\}TRO-|DwKy*wy0ݎclpWeW!&œQ@ Z3̃4bpGZGZJb;=ɕ*׶~\(diVq[oE[Sa-_EcWiD %0r5>pm "Z RVSER o1ȯ@m2H`Bvh0/x]㚺>r@/RDbU@pAqy붃S_g]_ſ)mwo*`y*ճM] A4Šz}XA%W1-> G8i)*/G8%ʠMB (ƒ{J&%{ +*OX£zkpӗ){tE :{q2 %KOuVG/7_fk'}Ny4,ա2ʛjD|kbrws`"]IPwj 8pLJ?DfTXT`#`R;wKnlcެf("!r9EZ郇E.UQ!9 \{*V+¼yT_𣷌 ֞̽Cnlr\Ѧ#ONQZ3 &W,H~@IP"Hr~|2}sZL еVy!gXQTߌ0GC ڮX5a<~XfeTJ )­rT.kg;xrE @%Xw]$1Ӣ#Z0)n`8;M낺-r>i& fړvc' ZF׫SxazT:/BDNn$LВ'n]*캏۫j!<@f2~~[\]^bM.P Z^f|3?1ӳ;+*1pP~f13UQ'!N ͙yMuT>O FwCXO56ÃSD)HD0E$.ku* (-o՝!X\#`#+~Qac!9|)I:63J},$L" Ya96SND<׊ \N(\1*'&c"΃CDiPQN; (b΂wXdK- $DL`PD!eְl S띱Vc&2b=6MVHK^jAjMY]UH;_TGrDc\^]%,eǣ3;i+! )$Agʃ̈a"f>K1A20-iANY][aI{X\s4!$n7EPsd(Jl psΐlod#^Va z@2H ixD`>o% q,bRItDQmH8 u$L+t|Hœ 1o<*j,9h?{Ƒ] VEqŊJ.j*^~]~J!7*1 ep}t 0[),^o;EPd9l-kRcj![a|Ŭ؏Zz ~R!00kδK}g[R1!)qP䑢2"W$^G!UH1>H$r(5HPi!fHOXS+qCqi$h@N~ Qߞͫ*#7 IH>\Rey#ґΊ_ǃGkTİW>dt\`QtvwgIfޕMK-M%{n 1ỤuƩ>QK(H Ts GJ*mKX\GA ә_k] N= & '`ڧNa !Ыf݋@ `&g[@'rm 4\P +𶉀Nv kiG$d’ LV0vI`e e81.x UjqUFn`QSI ػ QP%nf+` 8FEBYõ[IcP^AP-!"]Ѧ:K:gH!Z:k,b#5AQ`)|ܯjJ̢$#hU&~sB#or¨1v ERt)l UN g[Fp #|@)@k`BקHVGZ`Ě{t S @Y Wb]Uc鳺n<]`) Tv$!x"VA>!RC $ 4 :QH ܼOa%p@ʂ@s.T4~]<Q @3uzkVW51E!LL&FW 5CCn-%m|uu Q VP+`YF-k92%wSXX%a),q+U <f/@q^J ~JxpRl HR«1^Q@I8J.L"cFdp(EqDi"*8F>NG$Z9 'l65^]ym9un ~7\n%%o!$@#EErB;S9!Ⱦ l&uY|g#]1+TPNQ7JH LPO"" Ƈ3ر(y;}*o9"SZV: `,z6Yicc((v5t *,g_hM9(|;8"jZ"T^Qg%@$ u K67DJ-<2A0@}Ax@V˽Vƪx[P4 x>aLHp* AWweyn&S^#I|'ӻ:ؽ>hHr;,d+cCX,0 1w@T4pC*f]H_[`YTu haB^ h @;1-P,xWS=>hR*hNGPX(9hYܔ@ /XsZPD pd .ꓗ)0r6kI8V@::bPKHFV"$=‚y+ N+{?N,#ad_t7&& H*K/p7i%UR}T B+Cԛ?^L!t7dgӏh:W9WWfD%  T\TQBz͑$0g2fg6*U FP`KQپvD1jVE]0Dr9JV N `D9nk({xu܍ז0;]9Jޅj>=CsmdA*}VwQzJ`@Ti'OjcC ==*~^%êfcBj9_ +@.Υ <6|Yw9B|[$5^`V0 .lɇBJ"8B&UɈTF`g4rj"[@,ե*Mڰ b"ѱleFel,2"rC]-j9Z \,$->31t1H\5Fm ZoxfAْN)Q}}nRH\.׶Zup$k w (_~fgՊ7-h _, [3Xl~HKv+0ss񻼸Jm[jTړN[}kcm6ז2qOƹ.o4Z? ou|J~1Y<\\-Né; V4o%ߛML44O^ɥ:as'bX-kQSh1AGMm)Lx}2}*wjjp1ux9k|8k;j]}"8_̿R-.ݴx[4|qT<'Xn^/Oc;x{ G=xgK'/> ܢ׵{ޮ~f v2hZ꠯>`G⺞42bE./_|?Y3oY&zrW)J[S2l}l9H[l~xdv |䫍ƭ%w׎݄w!¯b<='1N"zvnW, !4$|Ju?=5}CaB"E3ewe͵Bf,Ft6Noϧˣ$WdV A˳n̋o>^>pᅳ5/>TfzpyInK$hldWч=f[!K[wM&،dI"7^ MpfVr[ Xk󇳲S2r*HE<~ i_t4WA Q6br[lde]>1obL2[!Ŕ.>ETꦕD DoGTq|]Jqky RkYxKS) |`6PlKu$MP*)XLN`FE擢\uso7핐oS'KckWWcLUg'h$UCQBW12!LLd]=NvsyDR^ΚƸߐe@3ԅ bݢx(;~69Q#z"b*ZO.qrtМ6I .i f̺ꄧ7xi0|rKD$>ʰi,0֥mqbSZp,MH."Y^8e(PO\_H'2c$%W- GSXk4yM nur]}'=9LiO깆*AO wV]g$?έnn+Hxpnxz`[vLMm+pf2qaTB==Mk n?sROɥQXZ Bc;`3B*jÞZ8$nJg*[@onrB ߛ2Rm6hԩַL2ӿf?virw'~bJ"ϖ<ǥ-n䑕N7K5R޺hSpO(͏/pnr 2KtG3b- *tG3-b!+ֵzwu|U!R^@ pom8 > /j3y{ [*@lN2IVE~[(Me65#.'kv1AvG[׽8ēcF|0,0Ig^P6Mvݲm{qD9 YK8ڜuŬ&0`:;\ACHPmbuۥ.[[Hu9x XZ2Os96 $52Ȅ9dkod#< FJŏ&8 sRtJ!ƎAOT"u:l6h=56r;f'ea?fᘔ,{,Ⱥ# M OIn!nz!p{孌C9׶oȷA\o9B]I=._qm7>r9&l-o^ۚ[l8_{>iqe=XO,\9m7>)ٵΨbmCsI}բjaFw-o߼L'f˔i ZWw>d{C?Zr-v+TfvJuom_wXA,jh%yTqqj yRa4Q 6W w4ei/;'AO]C2]6V![AHDex4Y: \MtՐf-D̟*=ct5VǠFW q*b`^H՛l4Rl.X%)EMD>  _dLpH~l8c_A$x>p{ ~\&7a@.fu۲O枃ܺH(|'ml7՟fE^7?V}9CM;O wP 3/wʭ93D2tZ1>ldz"W/<=8i)gy?×L.w]koIv+#"0@6 1ɇċA=ny--) s.")uS-M8!{tZU{O>/|_Pku3~=O9>~;pG6]H`{7D6'H;}EYis6n}&x6! B4j e[أڼ{Olsև\ȬϿ~w +eq$ 7rНgGF/r2OEy6GCu2K9ck#}?}'Kw=1Ϗ[* k?gR]v]9D!yzQr Ѣo|( P"f3E|k zF>bǽVV$o̽QTvM]wpc۞Cel!ceO o}X&+[1 {WY{WYܚŽ^L Wy,o''G^378R!:9 oN6Z ǿ_~ݫL*J1]%uH6a1Zm{qz!os#ΎO`T;lA CeZ.^HpeYU*5*&ӫ]\=>FrfM7trapS\FT-YWMy[>ٙZ`VKlg\߼Zl6 ‘Fsw9r OR!rFzXkN=R!?u|a,Qo֩Ou:_uafzD.N]9?9O17&OT9a{xrT1xT;mi=;,$s姉 b;UNs.βӞO[L?aԀ=倐}xv r]w6l3"K|'mεQa뜴2`zйyij&7d)2O:.GmI0\$T^v:6^[ kKQũv97wxk9;r= /Hx pu6$ZV[>8= czL0_j0'aVƧv!7yH,#'+0L~ h|oqj](V~fkɗg3|^~d7yic]zhІ@[b݅.zL6cE61GtQM*vlouĝI"s {4h둎4Lj́m?!1;wnw?kփ/w ?[$Кml's;KilOs;Aێ,kDNqpL{59FM~cRW0^ًIj0%[yZn /4'QTq 3N?O-V/<~R9 Iucneԏ~Yz+~9'o_\+Tr ڞ}s)enڵKUl _݂oQR:i-.3oҭ]\hK1z^q^ #8W{gXs%&^idK"M$&Ƹ~Im-Q,H/~҉L ʐM.#]$(k<wȹ}jv1Fatw#lg^c [F>7ߘ%oܾ%o%o"RVOR$mAY*!)J.Lu*Qe_eB6k0:UCD\όoffj|1UMM,jϔB0aEA({g%:"mLKhQGntF0U 04J7u.y 6 HHFm=*׭KU Wk!RW=瘰:JwSF>#-UTqn(TׂMEUH&(CkEw{E>a~GQ#Vzzn>RѪV9ߪu{2,>Ä^W>{wb) ebtH3+I/EKʍ-FDƮc,a!C# =vA РTI#K*ؙŕU oZ{pN^͌_nhT:{ٓ.(]cp#^Y2G9i !R\1.ܴKku8)Xj"Y<9gBrc+TqvmjTXVX`p,l\ pAcJk;*[I86j!‡<;xmZ!iXrV%6h%F,{7({Τuu `sP 8wCeMv'"c4BQ>(QN#ؽ`ʚ8C@iw.a1jD4vT9xt|+q(DBy@ZFj2d, q!FEp-ؑթ{ĺс?K+5{\$V i,)ud" xܜhRTDKF 5ES5B~QpU6=ZS c'lvנijX!nUXF&E WeR{p4+)&Y鬡C ~ #;hyS P)Pnj(H; 1d 6 Y@n=<`̓Uc>Nw_d.ݔ/Dx =pďZ`pjGEH@ -@TØJL$ a>(R!w#jX`_vdS:` XAq!;-)UlA\є%MH@zTx'd)h6nGSc&tGH Ƞ"qݣ. PD5{A<襒5QpT({G١RD16X3]2p$Hb$ m$9x_+|=2}Aa"}Ҋ4]X{łTM%М,8e@T[ڡDwG`BB%Gg؋>d:ԧK`8z6QU5֣c fu c$Ohv=! j,gA^xOJ8~FDOkkdT!X3F:]6b4.T:C $(! u6*G㭠ۀ8K UFeS ֏oC^h8!WQwN1@2d˸D!w2}X@Wv=E蓕Uv!cU ꓁w  XF;KrIn hC4*j]Je`vסk@) 'Y9M=b,=jp䶦3-h[R} ByAzrEt5;d, iɍ]FpX\HZz!̓A8#$ ʀGJ QB1HQAdeҪ\}MCwAq3cf<+B [ȉ*['[uc.Yd h2YPJQN7"{7eP¦lҰ B}&O FKPMr5nNZvٗac`w70Ώ||rXև+L(f` .uglhfѳS@ڸ&2 {໳š"jpkF& 䘴5CgsG[6nSp4\|QiF٤@(!A/9a]˩4ts ^!7*,Dt%\PS Ak%A򼥩K;hHcT߸]Oİ4'sˁ[W XZFf9qyϘ)`eWnpJv[{o-c"ϭ ϏL_xoGxKWO|@č5NN/VvYFϺ~_|\ֶZXg ۘnAè#K*FQ<+.gԖDRqP 6 %Pr`!7^h"m-E9^m>etdw:>g na&εkk-Yor DVd wYv3:/\?(mu.m$sikWK@*.I%S,: Nwn~V:"/# -ǜ }nC2RCٌY{Vo)ϧ``%L)MOA 9XǪPt8Њn:"Tiʖ^2VQوۄ x7}wTN,-kԨD~?ԨT  \lZ %y&BVa4,ɋBS/;j,FGz"0$XL֊U[Mb1pgDevOK|+#.Q1fDŽi|igg k#{}G8$?Y:#@l$9^F^N 3RPҮm[zhy?__»l= ,YV[82h ~ξ?h {gě快?/>Ə&a^ޛ$fYxyex\&@7bS"qw?$_7! ˿fcuDuK=m^bN k`9x%Eo'&dkwCƅL򟿾M氙x--6%i2_oz|Z-aSnfra6-Lrwyc3SuLeF}룢|$IA/65 ޛ,[ڊ\3d)ι\Ka+x:;~o]Ị l&%!`Zڎy my!LrgA`-i"C}Xd=mlwm+txۙUa3[ll;P('m@@h9USgڐO9g;ϱ5[ ;ȑ}hY*gG!]Gb.(sT`D%XDs򷊪Z(yFU,Ƃ/Q{D8#\KOS(@b Qvrzx`b_ u L=JSw-:N>4\uXAVGЖc9GG(xs5̳S,l~2̀,TR8RBx% ϾKQO("W" D=OG<"֚FF~|B'Gs7xAF2IϏ(X'@qnG 6*^(ЊA;bp)?V\|XPxu]Hߛa."]J;ˢ+.ّ]jN:0>]Z"mѓc IBN0 K?8cZb`:MX8 'N1:)dwXs&;`e ZNx8uk7+!E!yHإ|Y1ŤH$٥x~.isTQMBy)NSGt/&K ^bDJI9r nT#/)Zk1=ksa0HگSx=G2CKiREtՃE0Iè4{%;֭c#/)lDr ;45ؤLBʩ\E<3j-JZ*<{0ތ4Q/O味*‹'(]E}ӗ1ZԉLTR[s( 8xH{W$Q8FnC'Rd-0ݧ̲_-r]]@i4nA'ūn2O$HsQbn;"J6 N-ga®sVp[Y.k- />ԢНI;PGZ[Tզ2l6U (t=V}]5DɊ..>*v p4X3iQ7-2nl˶ƛeU|y)^ mZvw5&{q:l䤿(BpB8wl^ ՀGʉ+}`cy4'F*BA ,ra[;1#]nŲ$&%TnabG"!sX:=qcf1#$dDͱ3,}kSDu#+5MQ%)<Q!Ns7..M:C "|]!٢|6A(*R~ [ {b|)֬:By*ð9R.)UmMɐ[KrЀa.[fbt~iAMz "TԜH0L=e.vnJf )],sB\6ddNZ I)FߝjU(f%wZۦ]qebxyGxpW\^n}Ȥܖ988+,q32dmR]:RΛ#H(GkxҚ_g;j"W?}EubLWu{݈K_buu֍HvӰ(2_OxE:UmE^ 4=f;yw'8Mx$%)oAh Y";}lΐ {3mA#EᛤζX}򻬣rР]M}!85݈.>ϩ"9; V4sbwѻFD,$;%!tU1pEk9'R3NV5PVU`e=tZ滷!1h"w7L>|pO[R@ĪA߇QsTEz|a9vڢnOOTA֒fOQ I#ʽhvW3&zO;8\p"Y);pzYpuZ$igQ9+L䜮j"Y/g:kTQ&ivH"Ļ*\Qz6Oi_&+z@+lct?̈:y_51n* O椴!wm);:t{?UO\&,K]O8PSW\$:`t$ì?6L*3jSPZ={aSmu^#a](Ɯ53DY a_Ұt%f=ڪTu2 诮iq6~4j xS@0̒] p-_8d3?hI'i4dcCo) EM_Y|w7W^r5u@؆'/u/i@t%CX6RPBW(]-ή)hvU+qXE {_`f7}ŀKsX؞nR#QTIE&`$D륺*|pz[*ç#nD{։X xq( iq*)QP.⏻ YG&\;J Qt TM%X𙷥TA 0ps6 Ud†Y.\ƊpK,)ua/>3'e2{q1 3_\*; 2cB(k8t3G\{:uGd:;++*HA5ۅDz%&Cޖ4Bg.BCEc$. OgӲ\XDhǸD`A+uH2YݮO\,ɟK,ߢtωcwve7S'hȞq WZ/H \(/Og+(JJ<%HuGRI~Cu ')vSp H%ϫ_>iɞs_w/wkyCToS]C;O!J!W$6=Qߌ!TĊ(H)E)"A9((&" e*RPq)B&LiUTH!OR3dP-.Rig*)0R_o5OUZlXN̔6$iS/l+Ȯ%ܝp)Я}Kl={~G<{ͭ6`M 5a>}s弄|fá o[dz\gO`7 Xy#l+|m~|g+" $C}|w,{fR0 SݣZB TY@V?{ga5+bNjY`4Boxo "W1%( e7*w^E멊sli~,/'iUp8QC/σpOCXc|뽚gsx/jRӡWK }+ƛ D2[qvMuxzHWAeV:! D@b P#)1 N T"A0b bKAh+TqbP 27LbpA-0V/ ߐKT`: [q?H7(F 9 scwvZڲ2.vqxzC̝ iY'$TA+T3+RsF0.rgW\*KC=aflKwfdAL;X;,f&׻4_bf6rɍBDͳ`/g|6;k-Cb|ywߛGb$r[>/mB/ي?yɻՏjQ2gC.zzQ?y0 ,?j2j`1fQ~0okO]2'fdh) Fj~Qy-]A8v~~8J1qAHDw} ʐ*$(a,E`dϹbQkcA. ʖlNJ) q,^)1zm(q X!3z;TYUd6}ݸ{١=ɱsD/V;]ت]J"\l\yz_~C$ǾxP$"%ᵪ2rG 5(0D$0 aa@P!2IJI*> X;G8 !DBQ$\{J!@ D$SH!ݦbp:8jvƧI"8KPwԦGEH8*)#bq$dGY% P&zJ$jP; A1ڽ2c!EEgJ^1 %A'3 >s#|ybZuq&̇u%7Y(SxĊ"lXhV oGxGZsC+{8T㔷 H.i؊J܍n@c?:n?0>,F*j 1~T([5V_,׻^(`C[hSQx%p룘"XNl 4GbD!" ea& *IBqb(Jx8hEj! 4V RGka9 `1f٠w6ˠY|`1"#)EF)IHcb!ЃRЄTT<ͨ "%_$vz n lmT¯1-,f{w3!xuSʮ49/RS(Y+(#!F򔔿g $=6SkUV)&}GcZC.zz@0liSBoc5qE.dbh6>B8GxFŧxF6>YurgT=+~];дO ςЃSz"`فi12 B(`8qiׁS4t_umS+58Oxvp04cלl|Yyo[BU^71pOޣAV)GǓf>T<{W9BAx5@kFK vnhUNm]cГp1gsճjj(VS@;ӍxX`,^83<*A'o1oë;Xw,(ښ;y8%H؁ڏ z5ߣx>ǭܣ"j^ڎGKO+YF-,zW ԽEudfݚys ./$\ҙWQ@BҸT2TTOslyñ /JIy'_U tsL-WP0*[M+exǦX~ GQ pI"PJBH1b"C!epx>00#߀Qp @@aP@7EUy)J(MHl,AYb"h$D{>Oc7 LzFdYk*(fJ8-Z 5&e? WLȄTèf+ +$_;< IA1?E+h5aAd[]_E++r؜l2Y!9AE;]d$RߞƿgVBKVlueFc|aU5@1չshwi~G8 0 bIPLbF4cSfަ'M;1^'k'|XqB\# 9NXaw=7̋0?c 1SeļBYpjS%x!2"M y1NBy .~H .^{r(n:qAu A%3NYG`+ASL$%ݨ:U`RIc0yˍ5~] |> 4 @*ʼbX@ZT +e3DjJE(i #kW ܢ-ĭmj/# jGf "2cQQ= ajaw2p~B2< 9o2G`J ،X,x"VfqՌPs\p)ҘF,2>ld1TP i ̀jPIFk CF $kY;Y%nT'$xΝtw'hcMILT 1,UZ~Euws>3R΋ԾK_k_DXHaR.cr!>D~?}tKB4B22_zEJO+a{- gJ+p#` WpܹӋ.Cs~֋ijoo&/+ϓ4҇͗|evUr{I[##c؍4L8~423(`JJp'a Jʱf|1TvvS8? &sR+8AtA$JH C3h[2cxYEKt89Sc~<$dhFџNtt]HDu]XΰKC??)f+g% wWo qj2;QdO>\S=1H d1`Cf-^ >b1rQe՝IX$-IՒyڐ(߆$-p|NlLX&  讻z]zsQ> ʡb`FnGV{s,I(D7Ot}n\I[-wc|\6[SP5jc9rZa{톳؈}y2?]y g6lWi1?Zcyn~U0ghxvQm[1[.սhUH ٵZTm(˵*º3NO_ydֽr*)V)07z@Y]4$!\Dd C'Gщ#ĺ}L)̺EkݚW.Q2=MF)u偏ĎbzLkI%mZhB}[E`" .ԋ\k?'/T;tKBd+1cZaG+1=chj Xhy#2B`Nƍ<۷!6@bB*Q7vl'gL vKKDG6p.p! (MWoԍp"麭*,APTڸtE$l,E7%KH` v!zY6]+j,stfسU&c䶺Km6ޭ%L qX;!LsO ﮲_f1EtC5!Ԟ4ֺjP_u]>} S `C2~|4d-ƭR^^ t]kد58M,lH"Qv%qڃd@9g_~wμ1k#l;P/awe2Y_XQ?# -IЪzq{qIuZ)loR~KJ Ъ;Axs6fthYU7Ht2ZO>(;YzsOd DA'wP୍f fݩ&oܷ>T"mk*| ݁׫ 2&vLu`)A,6dDZceFse9Rx0k gOL!\-SAS|=3W.Q2%nыxĬmT\Y~A9BBM AFdCWbyĎhw)=&$䕋hLq/izG` _5DVɧ0.R&dt\CUrC|hHtiw(%  WB[e j~ 4UH=Wo߶p! E KXFllRBx$x^R^O@oa@O4DӟmŪD2 VVjFol_|rsw[艄-8Y}ARhuv$l&!iM6NaE8){ns>ԃ1;-FWQ,.C9}g_\l0%[y{@ rES3xR gN§@ @.uBV7#E!Q|i~`c5T#l dE~Tٶ-vCdS^*Z4qp+EK_smIRcf~ɬ4?t;at4B!tH,v wiۯhpu0ae}#6-OA$XkƠUi9O$#U # >;31o#)}X}Ņ(}=n-TM/oVsjQS"؜4T'YBO0!9( /X8rU>z"!g9N*TBbIL`àҐq@wV_:Buc^ͳw^cWHyHNL$sQK)e6^BR-4`Ku{_1Ȇ;^MvUxmvM%4~*j&x?[w N*0$`'}#h W`:ud(%N[tUCf!$b,' 2=l/{r[R%ܚ*9Nkp7vmp!^NߦAbt˟P9;;J3D"FAÀQ*V-2 Ftx\\C,!Խ=Z*~ T_T.Ipyttv>dˡmW1S!QeD≗; Ik_ Ɲ" 2U[')咨PЙBrJ])F$4[k@(- ~|(1\lݲZ Q@ejHY]|=Ct. q|\- q>TNOe9E}3Τy1QjF8P3/Wȑ5VZ#7p՘LlTmF};[>\˵-I׻aVmNOcube^SJ{z%r,V_Oߟ9W1l_};1Ez` l7`}3uOS^oaRpQqg_/{M}3 n#xęo0HA=| ٤_mP"D}F@$Al"%5YԑDmhRb>3j1*c4/xe k` |R4:J"IJps6M>5=OlU ȵ>B 9Dn1ƫĖAV30]a5XӓİAan'ޮ^J>륯tHLଏ]d9ہD'Cus샊T=m<zL@ t뭘I%yeYN؋!69II%N W{".(SI^҈?V pϑ2B$ D+dl/A $:,6,3  së_EyhVcf5 L8 C>kI/szS 6]M3E-&$Ѳ(K"gvaG/qFwJϫ̏<좞?JblLA-A^ˏ4_ GT\ZY\MVXʖi,3I,jSIOBsUJhCE`J9b;a4) k3ZR3HJC]!xnh؊0"uH .pc4x ( B>Й d?<77DA8H&;4%(4-O^=z?wٳt* fqy?`V^1*YB,-Lzt+iv/ͨF?1*1l8?R+<!VZ1q,C%$CPV#UQF6LfZ1ICPFs$*K{$QE0$QVIҢ_9,J291Y4EQ[9b0FQqK-JD\QEثfqWgz2h>Ғܽ]XK{U-5¯fI0o)yՙ|6\$ɓ|$EEн$E`VH)ia ?R!E3N\U/U5Js96rݞTL ܅OrX$OrXV~T>'7Q;^ h :( U@{ȼ7 W0Q^>Cus*?D:h`ctؘ{*}<4F Stmüg̟j1:_V₈]cnffvfKeo^&}c'ˬKٍ +ZAiMHRMYqy+N IKnLt0tJݒ\٦},w(o;PYZRaX TT2Jst6J+B]VT_Z/sjO7,i_0F -/aP} _G՛sO7xϗѯXyP8$W5xnx|l? b[TzzVIFIO|']NNܳlB}3;{hr}k=8aM.X'y[On|{Tf?8f(2 $;[H$HN'IQSA˜ޖ== 2fz)L?*%1yb#pE{I,i+y5q2Z%y-ymYJhKE:#-񍝆\ w_)oQ#]ӋdxXC?{.0מg/I%<{KZ5]XphX\[S5V43A";ːГAܝn8io1T)9N<׆y-Q::펝Tj:'fqeė F4w|c%[ez=gUj'^)׻y =4J'̻K~"ɧn qu礉071g2ߎod𷚜9&8*rT#S$< -& #Lȱn|t& )A&O+-ftzy[`aflJ졭z3qJ9vn߆.֓[뻱)+z1?UɽUZ?G{Q3j=y4g}7Qa";O:>i7|ځ]scJ[)w?j@7&Mm `d4Qa-'$ O|_x&I(& IqGg|IgG9}S\SfdQ!tyiqqT應Gm͏ ^Lz@wG J^ ,ǝ<NuDT5xRvoJ#AQ::QU8&N*!fJ䃤`ѓ F*+)؇; _li ( j! ƫ9*do < +hRƘ>_j'L?RD% QfAk! 1) )O&cģ5Z<;Yxsyrj+,Ԯ[֍ K*n[*dRmUK{U? ׼o}snUX\ YĞ]bT -s/?zqk9=x}K3z??KP຾-GAw1! %KrX[iwPZN)ZT52(fh#z{Tv 47VGO03nb]Qa͍+HhBJRhFZSݫ+.M..Mh'c7DsE1!)V8)Y&|&!{ P0r!J;Dk !9+b1h1>y8y(× W\Yԡsfmٹ2o_`*ջb8ҠU/=-ie@פtsK."5" V]P9+JqY)xW"h=-=}oN[n&#?hN{g'|.@4 9TFx"W a TD?f1蛡m] -DsF( 4#lT[޼feػc _TGv߿ߟzn(cc#q&Iq2E1)XM $ _*Th8RѱU1W ;+m6msc"9#12T{%]#)MZ yұu}z1tuN9u l8GqQיAhE6R㯈7 & *)ɵ }yHi!(!Fr+t*DtS孉\> 2\15aS!4]{4aγX9RQ 6?杚\mBY&47GB`'B C0VZ2|05M^G(T99y$Ȅy:+P"Fd9949*b+B -M2GB\FIă:hqFZ `V)zZy "5^eਛ #9>pbhvV SYӹ"OtP;ҹQCCdh(\XS)vX$TYH&hI|kVkv %|#EI1-@hS9QIREâjP8tN{M)Ql~f7G:s_zJ\_ۧh_ޜ~e$p<"?jvEwaۿJҚ'gaMs\t6B%0ӓA',:^Њe!mgv+QHi%MHJ Һ}Z.Y&ZZ[6ۢeP6HTAgJueΔ֍H|kh1fgh;ZCʣͮI|)i_5eJZz BIѧyձj'XC Εc#.u[Ϲ2PҺ `o<{&³{ܶ5_¨ʌ ;`ƶcН)CXaUh} d+ٻF$WzE} =z楽B-b(Q&><"%/1UER[_dfDd A4ŏߪP6$-VщSstc-bs & 7&4JV6c[/LWPElNұ4^@,9ko lZ7/H>UV2R8߬ڿ}ee^n{#\ϒrqhɺO|eEhVwF K}WSYoֲݶ-Y,ً_ck~]LLG)OWP%%9 .> M֫wu eqSw^-Ӟq0p/|VO0_n3_̠$a@Si_{H-a:D;/L;ԥ}T{( q aR[$&F2Nj }|,"g4ѕV@`z8ĪTfg,Ė})vmV|׏pX6=55Bt[xH2q8$*ÝnRsKxK VJNQ J˦nc4 VZsT ڪ.7U&g[+@w*-`"Vϥ.z5ndsHUt*U0[k7ւ>:v(ZAuO7+ ₰C>9u5ē[Ѥʢi /\u oQ<7;*3F}@ w67},v;gv9,Kɀ}unKM: (*]gڶuvq]#x>DCj/9BĸhX٤cڗ8翯e пX$.pz垽L&ca(Gur+]'ur+]WJx0*Zj=WAPDECTP҈h$2H֑TDO%&WއvN^Ӊz?"c7}SB9s727'_)ެ99jeX Z:Nur,KaXE-^T:'#JO,z$`gӞqEDD{2Rȵ><3dRJHIe˜.s_4 ^ I&>+mY3Fhxxnx/R32y5#>g6'R%J0pNyDYCJJmWo9QH j([,j;8IJK r&/h/O`H+UTԄl/*l0It *kJ:ȧnu?⚛b)t`; hUìM _$Vj@bf5څ0AG*Q:]M *sU1ηvso!bT/֎ou'0fIv^ nߠ@Sqt2aov>^p +Ei!绡&3^AR"~O8^EjL*dyz;7B8pk1*tDQj\5x( w8<˻m吂WsX [h@K86,|w HP #XbU%xa<˂* \(bA(Il*Q w@i/٤hXXcZk,*bLpzW ^Gk<:Rml4;5Gk!!9!gka =gޛ b͙&y绺&J*u~|8Cc;Df>"WɥNGG|$I4h^WE07K86QSN D( :^ ib.;DN4Kdd dDBpHx D"D3dt\xw[- "?Rnۍ}v%4 xqH=rX%S1(*gQq(iQJLgJ,'1mFNhb_wZٍ0F_zXeu(Kh'M)Mɵ34izP]ނ]*w_Mktc~ea#lVirHGu(ɶ\chÇqw`LABnKLdJI.oNo>^\ {Sw[Fi݁,?l{$ĦŽw3Y#BG b6%y|J»Gl,{EyHyJ51]056 BA#X{xwqb ! 1G썅! v<(bd唔x+yS] 6";S {n3P!+;MH+T"!6ׂiJzZP)S)M4[D0~y8Rc*:$(QZvØx7T!vX̎3RI$бONsaᐇC!!(DDt2 ;按T1gtX}'Uoe/2^ir\ssIh*=Pdu%@xP&9`jh9pTJj1"(¯`2X ؘ@ygZp)C& +8b UV)=_pJ䡌gs=G8efSH0a= 6הZ75egϕK)Ai\syG%!r͞c}@bc=F c*J8]/yH_G,'<} ؔa-q(98d 0$FF5*R mx8ŀuk`b>V pThݑBxX8#JkBpbPgk ]Ǐ4&\a}3j[-Zq]P_ںB3Yz*Ǝ> kEni՚hDWm-\9 WH;%PÃ+ƃGpe DDMD76`8y4a^E6u5 _Q; jܿr7R P|ImyD?:p՜`L2'ᮄy.xiLF<֗l=z<7{|yDc8|LoEɾgg?S@jNu !ҔzW!/$]OjL+XrIUxh u CS޶.zgS 2)( eh *d\B}0BpOfRSqMK̤ [R1vmI L}0ܚOx.,Q$E1d_6 E8p'R{v[KeZz18^e|#-D5(T[~i C/&*R_Jqsufu=&k0:_w|MhFA.ㇻd_?Q(sנ3OcL3|y?!㈖x_Dcp"r|!)Io[<+on<(F9oV){5!A~>-3]2sK3R()}GތJLf*RJMOpM7%j|w+Usᣂ?vDɠ y0d M`vim!h9FVRTafHe%Q*]*mbb.?-buSfR3&OpfT7έcpuSff3f;ϓ- 9N0^wg>˿+bRڙ=ZϟqG}Lw'7F=16[øyG@ce׈6Bc2s=1eHiz؊o8 I; Ds5}xiQ:WdZ'/<;JԄJiӹ{]·5gՠɠ])fs _=8Oh𹫴Ns܇Y S/1c~RHp GrH4Qr2G9Sz+#3:eYq7.ف n̎vڜ G}׫ɜ:*A)@S6ٖ[2"u 3)[oLfy3{Zc k ԆHc)-2)Ww>(E0 YQ'2km" imoF=l:2#d+!0<ӆ[dF$ epXt RtGF̐Dsfu49)[8DUP~2S1U3 $GjNg! T$4#*"" z ?  i'(gp*%Y8AS.xk釁n{NV.jHb"%1uFlU^Q5ץ\󡴳u$p#3`^kgB"8*XAu0VVI?ܠ3Jc_` (([!P x0+f=8<]*<*ePn ~oj6~Wh kiUR\ ΢vx)v'*i3ħKZʱ19ݥSC"D[J%OoC$x^j;dW\g˗$Fa`ko.KTЇjRY0ů)Moh>c#4x7 )XGJYo@`ANVEeEӛ18r>*5%U>`" -Y*)ĐIχ&6+c7>*﷕xk-ڈy$U\XE%}VJU_0NODϝ@gDtE9~щlp?_W{qxM;l'"e^܆C(]Q<7Z I5PEtOWA@Tv<@TF8y7۹~,?\St~ԫ ld12W}fx[Ӗ/LޯvJRFz#;пԆE|IcW4|JB~5+P _6!3e2-iYLeyY\fPO`b c !*ϘE0*FI@ Sc5g4^ ޥ"5E f91v$#UٛhL1"zҔsYZ(W;~_&w쎗UZ CR @!T KG ,U:,h֚ }ވV'H-h" ClAG` È!|CAE#)cJ++08"Ēb_ƉiT=>6u:X.TX2ZKFS1NF"klVJh"GJdPi&R18$ea1%OFYI1i96i@T040\s|D"IXGGmo cpLV) KkAq8t]cqcH\LupT.'e2q_/U#ڴv e.RW p>u{ Į)߇yv3U jy (gOt 5 L`a9%w ~xbE Ow}GCW.>Ll06ws'c8EМ́i=fL +L4L&@&A0$lOP5UmK]2LSj~^{qi$fR{ D)z{*RCtV(G`M{Cq.* ' ʷƶMX :^@d){p廻1x|OF}KwuO(^BCtOJp ݣi3P,p0|mh#$Cq=\5xux Xn@ϓ xa/^}ƻydKr ˇiiP!8bc¢@鶿>}m̮Z,OK4Axesn%ؖT ȃ2 Ǚ "^ l٥(qF|bw6ڡX Ly KU9b BBvp8kqǙ"!),Qpz*LtN( g|@j<%5(- A˚s0^TlauIL k#D}ڑ}(_EwY`%Ჯ-{({TBo#-Q=PU~5uGӻ`bсҫ`L {*\szm/z<b9ARsXܧQza3p貛v4iC7Mf|| :im*=ۆn5JpX B5kӮp- pÇ-V$t| mÀĔvQ5MC H8=D䕹GAU}[^kb{ּ^M[`>O\>ܟ\ev!n1 :fxM< _J[![O!TkY{َ"q86k`Kݦ]!7Ѣi-icWaam_>Mm=6Y.p-%{ lOwˆ"?uG!zf ocpeAR:`fo_&EvZnaupKy+kXb^῞9J_dlξ,b7d=uOn8t s\mFtz/g뿟uǝ$)Ok/QNԦNҤoTmq{~mw sR\Nn/sOcn3èws^=9UBt1 1g̽TVAsw?78OCLuVn7%)+ ara9Xng:QPOa˯= [=Yl90I9nibO^>9be:xߔxY!}t.!iK؞juWR~\ݩ2x*nKw}sYt ӏj9|XqO 4QN-mY5U WjY5u|yb`U,N+]U)%G9+ K^A鞀HYd ̟QTÞ1?bJ 1%_ȓ.~:ܥCj ?y1~Dcb#Hi{aU_bƜ،;PU(HQAA0k H]U\-=jYijo2+ VnI)+ 㒡z]3yXuIK iA cu4+x툏\>f<0h>)qnO X0KwG!zR K(Z^uwktN#{hL'֝~KRi_)R_2lbѳ)/>[JlbuGg3|b_/fYynbS/f=[/1XXBٻ6lWNZ}1$qǙQ cd\SMjdwj"#f-YΩ:" DaIlH`dte:0Kj'w|KOۣgMxQ/L&g|Eʋx|q5Ӌ~!"K>fW3O޿XKŸ({Me+JvO[ WU$~5]]SE\wg(.EdvgA.w~,que_ͣf ,J꾎og*/4T7ƪL33ō(hY7ٽo^ctq{;EJñ}ޕiH[7OmU#}tFh&lÒz"(;8{:˲oHH&&08v0lMw" a?.k Z4=ܳT(ۆTӔdƨT)zj5i?ckX~2%0!<9w.;c"|0a+vx"U36ӫ6(lP{]K6Ed.ӄ0rվP/vўNӧT j?r׶!<;MG.dg[.$2MhhVS7gQݴ/wis Vt7 w\Q.ݥ6V:OKW;+ᰱjo kQ'V^ Gc"hZU8x},gd IPye["e[Af`!fP{1VP6;*Jܣ'Ɣn?VЪ:fmpHwg}V ,Tv'T'mR߬S 7@.M.>ZAg~}ڭ'jҰ AX8+5PZT0:~o:O\8RHgh~1V|d<_|aJ/]{7ԾqbQsD ݕkOzX=]D#F'sv%@ 9>j _|t=F["@:(AnS x{.i:-5nH󾟁_t+Lt%=^wO#}E:9؅nsV"mZtihԪӚ~z\m5gnO ٺ-H'&n(xgTfeqY^c, 'o7;O{`z3'Ai PzE>m=|nFQ %m9T_6Lڠn\ɕ2*_xiPNR Ky._A~O\6m퉠yhJ#}lEV2)])yk۔X|ڞNߖhW}] 4*BOOg#"v)}Ԓ#o)~[*fm:=!(l׻jWy}Ԅ+vu컏akWV}ԜQkX*L  )2F[b]XF|p*5DY`i{ajl:%0`L,OF7=X1%L-$rÓ& Ow|χn4h_.~^5׫ުnqF>#z0-  teIW=Z*c9O dZߵ'zwaV"t~pȑhR^ϵv6ijn51:5JyPnu'ZY5!G΢xts mSRڭ.UXnhn4$ж[VnMpȑh-ߘtfjTa}lw f{n'ZU5!G΢5xJ ğ Mp,[] N>n5TO՞hU9⩒|7oj7aP0c;P?Mu)}߭Dn9YOIԳOnrbPuv;\Sޟv=Ѫڭ 9rStWǜMVCu@V*Sm[.n'ZU5!G΢xts-nR< ߭.Xf[iM2BjOvkCE}e  |}sZrZ5h5%)_66+ ѮUQsIRneʭlTF]NZ<נ$Lv=z-)g+l5* ]g3P)'4! RtN wrA!D@7T.cN1F%Ahҹ ZcN1F%Akݹ38s17) ޹<=bcs1ftcN1$iIccN1&%S"c悤s17+ Řza1E<3ڽ87bJBRMgbcN1F%A]cLcN1F%z;!cRcN1f%AIֽ$ŘSQIPŘأMH1棈1Ř5"FL17* Ƙ5E$ŘSQI;:dYsS9Ř}G1Kx-S9Ř)!:cs)ܰ$pL;cHJb)ܨ$H;c )| 1f9f)ƜbICbEy ,Ө75CoGAǓo=ذI藺k1Ӟ hѳ>pz5g0)!*W-SJwsܘy& Yw+OF.LfzCzwEz?4v ßL.^fv>ĩ*wR/FRpəƃ Z,Yej$hnW8S1B1Eq0e <\cmN87~;2Ppx;ы޲ClMDTͮº$ |׸CUnur]O{`TW08e/}]b`zUڿl)EfYgIsq &sW.<"N @^jQg/AJP (*]!B@Xa(|F h,%~Y^4F"pի`Mڮߵs* a]!׺|CMZD$_^|MW,m.li3c=aB0 @9639eקQqVit'O?RkBU ϝ` ~!F`9`ʽ6K pLp+iFy*AH'ֵ}ϥҙKgu*8Jp 6ځsV qE Xw%v8!0;wƎI>0kgU0S!Dun}νѐ&'XreHa-*Lq:F~oװ\ʬc%v IE2Fsip+rqeG-}JNyb'/@Jc]7XCKV"`I7lw=E~T{/, ݧĸ`^X y|3>F3] dr99uyh%?up`T7>ADdg} ח}l4__?ç8+zQ+LgA28-yI+v8bu6W0>ЦQ? x뭭n1.9ˆ6 QlҌ=~ E([|}ԊU8/(07z7x q~F^ __bLB'<Y3yioVe+Jv\Cz~}엎h>aFHC*zDy0GMOBdžZbyaDsu]\_$X a |6߇ԫlAJjuz*ܸw"d9 n\ P|cfMّtVdJn ^B/#2YMzϑN焀T!9Oivd\HyIưY%LA(iӂV/!FzX\o_(Rf<(#CsæBG k! 2"$ScSշTkT)~k/Y[Tz l4/&ɛ&dtO>:r+WcI9U湷?n@|&ԁ\ h['󿂛Gz&S5|,RV̸" z$5x_"hr/`tnƳOLN#9Af9,9,'lׅX\kBY_⠪t}RڔrZ'lg OQ,OQ>7H)luhXoMĽOx ?%X=H[%! ef$vϔmIlFEBvTE>Y_{%evܛ΋DDQO4CEIAYnf(+җ BXis_gJ4wc#R7Ip 1Za2,v_7 IN32,U|.*gU ;<;8K'O-_Q kOQ] yǾ?nNeQ"~qvT?h஝W*&їDY84{>HE@Uzc0w0Ut~$r߭9 Cթ34x,SVĖ`–hʥ&^YSu k ލ4Ĥ8 95J C, g)}Θ9Afg9$4%\4=ۂ@ <V):xT@h 0Bڄ#` & ,ئ4fkӌ􁥸Z-Rc1O0X0[MH.4䝇D-6AOQʐ:fUv Հ ?t,Z4q_P4o oŇ{&zfxO69Hm㽘U?}rTiQJF'5+H/QSj8BQ5)-7* +<L ofe8ZgdrJ?)Ei<{'*^O8lx[*>,bNLL &|oᣏ /X?A%q0k5Y>g1 +0s'qN-1$$hM9v#PCi'0!k,H~ 05hwACsv5U[iP%tOVY J%SQ'| +(|Obם`j. VsM^q5(8bDbݜþ|]2FA۪6f sto2fDtdvS~=`J.c=fv A񓍚oy5dP#*u= uEJ݇lp3;+_~./QΛѿT.? CP(_NH&GdP?嬂k3}/K9/L 6?~/./vN9Ն)<kY5@ׯv5lx؅6L͇KdYX6D3'y7J[-s64?!0Ɉ=}t_e$ X~KkgIq^QDi-_>zon?c ֢//}̛.LJ\tʙ<#簆[$ᛤo SuPB0\J}T/o]9~Ȩ~N (Y#OT+`VV&~8"aaM`6>za?U{Y}o_օzKwqxQh0Yn,O,;1`ʉ_Q[mF8 :mH`&6rX'YƙE1MNZ%R'>+AE(:zܨ c}5%׾ODbJ;sE&ÐwP2)9˥ȕS!uCi0ur1&sa'dp w͹ Y2qPH(` Hi}$dى"{n ۣ҃QY= YmMEJJQZ0oq`f>7M/C;O?P08cЃ[BC^}TſLUL;+9X4fqMxe (1@0 q {OQspBŕ:wqFodz @]FZ;~JhW-b$t UWUCG)OS^vߟMS_i53S] 2RISTi=is[svD"ْ>Iu)1QGQ00Oxq-Wݥ;R $oth3Ac oCtLTJWIHuv)v0DOolz@>">>jeHohE^U1cK%KF\1LΝdBLܗX3EC?~Tp 4B:BDm%k}|{ Qaogq2H=W 4PC ڱ,HU4ZBa-)'gr3#˔4^PrMtV =^,Jj uV͖)7QNF_6{x0)tB|zR(5OEvUb{X>QMWVa0BkR K}zVOM+F#/;3: 3>T2 U*%#4&X=lo%cٸclIՁCt lJC0iYMR\~_kjz!*:<  a851 юY5MP&dסg : .fff+)ub,Ck@Ryp8yp8011"rhNL 1h3u| Z͞\uc"}LŠZ$}1"|<1l rP* KrǪky]xQi-mw++e4N?9#(S՝x':ɾww `) GJ!aN`xz/ (5Xzy_p~Z$"׺]iGWE=C,{ ygo~Jzvo E ]7i%狤1RP(]hBJ[h-_m:nm,42 i,g`Z3R{ Xs1LX"ҹUKP>u& 5M;tN9$46'G`LX)N\9b1( Ti1Xy딦!+Kw:#^b)RR(t7kDIݯ9vM#JC DB`d!2gC9@e|4t-jk' V rAo)PtZ܁$V$X 콤9ʅZBZޟtIINSj7dPR"^`AYU)yu%[{]T>Nb_B`X[W%"+ڌVۦzbxPyZqt&6_cc۪p?zhae \ "c\Gg0)Q.G¸'27a4gSGoS186\MeY񬈴d*dC>^wWC|V /z~`m>q*aA|Y䣂Iى yɵ&/F"';bE $Sm|w3U3)6ӠOg<&G*k$]EefC@b)B'b ,Gk zՈrAS961 ibuM`nX;lM8tI #MTd^ }4tT,ei/J(hR/4 fC&tM./q J"LC t-%R AGoB?΀c#"DxV ;0-xazKwػڷ;:@2~H"Qc>.Cc UDYYOp![G_3X9{4e6m3x3]*K K4SxdidXohe/s6&Rv= u›>`fX4 H~vH Q9 9~ *ϽJz{PB9I;0 50Ŵ_:{ 6Նz,i.޵c"e)K?e/ &F+[tߗ,]\%QX.΍9߹S?V~$kV(u&OmPF0xl Ưq)7q&\ŒtDsQIuE&VWX{#:UmUA1?[CjnQퟨ%4@6&[|ɹI$ip;޸&1=o ̝1M]|Ά{#vN+W}8߀ptਿƔ[V2crNmʹ?r($)c~1 ÔJbHhD7dDjsaôB\12Sq7EM?_?S!eB!e[؏0A_ 5n:maX=lQMœu)֌6 #ZY}3,-q@n:>YHY7?b0fd̄Ďgq˖AߨPϿ,6B  #CܷlA V5Ԛ[eZxtmrW^7eݼE]rOhM5ĹF[+,7mJvםGdPx٘40ފ."Nz=bqڿ8gat(t̜}TO7W_zraڎ廏g^xm)7mƨm6~M}@-Gآ qagRˁ*BJw#( A?M1^R[(+85&%ǒ,*&bVTLQxBVW'p#_,og$ pTv?sn%E0" ћADZcB/p=X9I($pg)JME9Aa v7@3Qws nsQ&b_ #@4طl_q:/6:bUs^yQ" WwrvnRdH928 `:&F ۗ_? 9TҵDPF{$c$9V*aVBShB'e~jvVhT/>z.I^ zQrW.W=v1EmB*8"C!\ ,A)=_\$4yCi?䢽%EԠ@x@ΩD_W:aw1&cIws7uJ5p9** Ք_##=Frh=~7 X)4un|:i _Np)jީhnG'0 8FfFVa+`2k4G{"U1}x~s)T[iŸ"J*gJ <݊-yI/n4/jfPM.8\kM8|o{g)Nawz9 j#iZ7mpNP`Ur$g;N9mptEIӜ @_ܱM-&-׻$: w9:n~&^bw_\hUg0$u\Dbdr+K+%zGcz,R R)@ϱ6`hfjN5B't>p2VVAٸRʆɵc;.QLVu "ܔh˜5u$c e'+G *sJ̮-`2aXBEd *!XSXV)s_ |7H|+?2$[[^ {10{5#$-JAiD9>Q 1 S# N@Ba1cC{.Ѐ= m:Z 8!? 5 !<0yNJV.hziMWɔ/7Žiovr3~| Xճp*W#X7,]/8caK`_ܧ_g${;Yb"Of0ǫcsuط_ 3>mI\vZ㟳 ťu#㹛P=A7:n0^(_?O U-_~wOX&й)~ &xIL&ԭ8 X1A@g6{G;h81qfiqShQHC:xfxzpm&'4h 4dMOyjQU'EJ9*2iXP$+zDQb;  iX]K $kpN,*z;Q,*I{ha1P!yuC g1-~%?mgD ?<`c $ ʶ?rGxÐJkYyq)b ֱ{ A@{?p[, )=zdlO57]-Ԛj Rha kbZLqH4T@sN>bm4go 6FPsŚ+Fҋt;5\z^Wb/>,7m*, @S-_>?{4m1|TJ/TXqpvM8E,]€q," /wA3$֋Je񒖌wޤcMzSDvc0N5Ӑ׌kN 8[Z<7BxJ!XpF@VB0Ӻ@83-[3-6*mJTayt7l?,]< PnLw2Yޣ< T4:ow B7Gd~yў 5  !p -=_*MF_L4g6W/W־&\bLǀ o o:^*wb| n` trQL8T6?wРNnuG]svM%4l 49xDWgszY7AX5pFJM6כ1I? c졭T Uyf7HߡTA邏ǧ$Ogw_ȓ }Ogɟ4@`wҷU@S>dɿQ; ;؝S,Ts8-x+kU /X>l#ȒpAF<N~Qꁟ|zI=D\C>6y4SxIaOk V\gC_9\}w(0AHx]i+}Eg",/=ZY6o^h:I췹.ɭHeJ|;eS?nÉk&wqt& ('-^fAE1{e@;geN- {-ҌS츉"t x$W!V@0HzvA2i2l0]&l\g/2KAQᎪMMA @).>,JF2J\2N:$jOhe*i0w3VjјZ)o0t j}f2*{;I÷FB0Cd.3 :Ԓexegtp-)Ov hE-Qe J_!]q&-,l7JqB K:s,&qn+H?fɥ]2%S$Va `CBseCebTpibp{O1a'hfUQ~Md (anzjj(ː!dX iȇx`a JF{2 n4G sqCHўrTp _oY1JɨkSA\R~`f&OMP>k7lJVȞzR g,&ia;^>8<>O#\oDsN,sG Ue\KC* ؐ`ok̜òhgB` s{7 /!WUCbiFa/^u 3mWMryiPjA=x]{X3Rc⊱>`=1=UȐrm3bȯ@,OGlH>Xq:s6s=f9۟0 "Stgb fs[T=y hKpEZ! p`S{CZ( z3(:'!Ё'ACЏ?29B3|qEt8sE%N_e %k4;-a{,qk]穿J s^ͼ߂4Azqj΍wGWqh?_۾?/6Wۈ' 6n㱲շ*zxǂ"*wdfLU|(Y$p!y\B|3"AϮGc*hʍ1yN8Qp_W #$OѶ38Qu9gqrl9ٵb"Ʌ-i{Pw qȢ54xsmxGsFrCyF]4ފkֵŠOJ[㓾荂h"D=z nD+uj6٥e)KNNQy-'2A1tFz[vap[)zwӛV*R_7j"C!noDg诉ih[4RG)w`U4J&̳6]A&)ݮ "&ͥWP.Я_e)Z׆Ξ]?׏CY{S- ?/.G'x$&.2뙏O_]. ik'czU9U\9UzUr0.AqfqQ$FE=0ՎH#H"nsϏ..?/r}fջ~]~$ "|Fl=Go,޼>ݠEc{9aT+DN=B#'~|@W"vE4iӺa9׎FnEP)h1jHԲh18F xaۍZa&d޴CȢ cAjqӹp,js*JPU3"&=5#lp "vPgcd.*)xb" D%3%CHD @2+JT,ka"B28{Ղ (Av ut*Lw IC#1:F&bR$"M!Tצ[䚮Cm-hBh 3H}<~9xElTMBJGST!8F`"v,}3Iٹ {q|2sf޵_(}ܻ}7#B.|ϔԽD}hnx~^Z&o?!x]k!6ɇ.! ؅kc)Y-U |-–sdy^?w7 `jSGVHf$N3:5lKsC{&-!ʣb,zAc[$P)ݨUD ə8FWkOb_܊#R#2QIsΣ-ڄ;0DyBK:nZn`¬-m M/ag3tԃcІSEqNq,/#z |m`&k٫Yiᦾ| |: 5N/ _*k[;L$)k`ܱ!<6 0sBUv֧pã U TFWV ϔ7$ٳOK eqGhbAk@9%DJX`. |PN>s[й8!n&ۂnSF{Bf\D$aGDoEH|RLyPmbF%DRF ΍fZnfDaM % 9IsDX0:z* l6rAb2E:"26b?4h5 D_Vm !x&@E/D # &R J:3@%SigYӇ~،&wMwۼFvYl$*\Fکtj`ËfxQ2BP(=%m&@o6[k6R~OIgbs9>Ui_Zt CN(cf̨ <zC(˅pI !,( yAsqDti[743!Zo$k.F-S%lt;A|`Ƅ\m6|̅QIJyT*a"&K l 3v02A%"I[1: E-Bu`B ˌ0T9r̪s0\-؄C)1 v!EBPI bo4ehG~6KԦ▻fqzo4'5 +QA$i%Ȁj O($8ЅL6ԺMRR ftvF+59@{l>]sju md&<)YWA)ƀ/|P)5c$cN1:.ѱsNB()!c{>OjUx%SaVR -GHO3lAZu(PD_>,̍ 3a :mw7j hķW?*=$:Σq>dr-$Ȧo/ Kd Ǭ Y 'H{_C~#3-_f\GIlT!rڑ`RB'"<FJEoMrHx !ݝZ̯M)N7<#T`Od73&E~I=Mr.UrUHeў ԘܽxWd06,5zS4=laG#`B'W{~bGUm!vB2\߮ʭcqƁH6FP/3IM4޳Njq˚u\١qKJӂ3sg#%u睏n:*Qx Q=F/y6M>{iE*iώyϬ)Wϧ4sWçeF./.Z~_]-;\^eV>uJ8Lpf0aZkEdHL@D0hGcs+M4nc."8x{o[nԮ4tcĖ@#T#ltڠ[ˠ%/=eJ񃻬no ^m=<}A}DO@D|qx}mjo/Ӂ%s;dǫʛ-V,^<:b\J 1m50O~默S[e 881_XE6(4JA1U~6$ᝉRsl 0h5-xH*X7-z - 8z^穋F`Kcf`5?z!ݯ]SSӿhpu>4Iٝc dD۝c&&%oˁ gD19Cf,ʼn 2]JhƊsqBbTle sp%3rq;T֐&$h]^x~jʺ \x]7~r\G˪79ý_@сe: yb v<&/o.7-߾[uTh(˼xշФɃ_dG^ti*~W*]07"Ywc0-J}7`ԓy3*QWN-h*{%N LY"Zq;2&@[󙧋 KY4_Gy g+f0{ho\j.gbEBQpb&K}\jr\#=q `(Gob}'KXik qݶ϶?(ާPK87 ]=Ya?}|]׬wXR3RK0t+i%ֺ2~z zji饴]뢑y'UZRcC$4Wq(ku\O]-]fqQHJW',λje %:gc^%EJqP-r>49RF&k9n0"kG2ȥF81F' | IFyZFiN3iN3iN3iNYoH&EJ#,-'x+czPd9aqJ-nԪѢSy֦5֕Hb׶@\CB0`Ik4k8n@Lj8YJۼhOm2Ƅ`t*Ҏص)gII%IFza:cPOou w| BV3C8ׄ\룐[ʧyֶ=e qѬِ!&z^6&z7:4yCJXppTck D98}2@1( }T<<}"Lc@#Mtы%*ĢǤʀcp8cQsdΠHOnzHG Wr a?/# 1*EB9!BN1`,+y Fz-4pk9YG2!AJ$ DLA5\h i&!C0.4yFՒWg\444޵֍#etK]bOH;-e_8ϕ%1ȥHdcU?hZVJh2PV+XKD6]e ~,J3~|ŃG?"CVϐ-2U[[d+EƩ Ei)C%VcFA)U;TFJ?s-[aj#Khk:ƱSʕV^?HkDzũ%/ZOM-Y F{>O:<:)*nT]MdT-R5lrS nu5zꌎ[](FK o>k j nFZsUH;+ѰbyŠ /H봗hJ@XQ5ð:A'(*6VN&<(hyr+TjJ:~Q]?I)Z5Vi ¯k5^;+T I"{E79gӬqljޭlJrc誔pM޹[UR֢ ֶw!SRkZ7M-T꺚37&!U%T)ϒj}ƇJJLW})*TAZ3dNPzվ&?h-Z~Fj*Sc@Ҡ5<n a Q6'6a/fkձ&\` 1 (&vb:ND?B; IEUIVWc2/9g1%O6_"c|v3*]pLy/ūեxF"Q'veh!@S4!%kg:\ /h^U!n0Yl۟1}N:0é梶ka_?N (С1"~~Gga 6umeLY+!!BjcVṪ/ 7_..& !529ƽCgW~#tܥw?ㇾkf`~J?CFmǫ>+L}` AiwX?H`McnۛrhMBW9X.ÌTnv.w,2MԒDŞo6~Jh"xպ-@Ԫ URՈVzK`6o[$?j>^@p,DgԸdưuf4uuf{:3ZיrJhH\jduIh (: %{)uf:|"C:R+3{N9%EB yKLR7veSOWQ_ 67IݏfA5Ig A~DZfwfk(Z !v;(U$Pʘ"l$;gF "cٽp0LuvosWW?QU)7(˶.UQ)\kKMIŞo<\Th @Ceֲ!-WlP7-BXWtYJ$q* $*.] U,ZW,T$0c5iF时 vES)k-Ϡʭv'H 2JТ(E5Xm] RITPu c+jm &Y1 VԴ՟룂^>2hɂ_ѢTb\F@Rl^\'ٕejh5eEUiSjV-+V/*A,QZ~&qYɻ !︉`!da7Mz.@a06%l<`BMpYtu>$/bY;$Aڙ'kt"xopmPPZ pɼwd &KF5p\;ocD?>_nr4 1\``:F'2s YnnJ*[Xh+e.{ |hoNbA#AaQLπs%0 ~ia`Ըa2 ^wHlokS33~ayb (i)S>ݍU#>'ibVY:U5NֺqlPT?CՅܥE֮BJf!k(S S钵Wyp+O%ZaK.G궩+ǵB6%sһQuUXW[E@10OD]BIKQU8_?0bz,%oXP:[Ds*ҵwŎRB Ǧũz*h,` RaYi8id0x<h/_+:Vgnv*;*UYp 뚲GV;߼?/52os@BĄMΫąXWA'_myOyH`do,J  5Uט0j]x;u>vazL#uc2Qҵ;=xPi",t >OgўRf~UST9E:OMT?| 8 '@mkÙqhi/{ ӏ_"" CetV*&%M2Qd:,)ğۭ/:hځiV/mdQwEajx 1OԎ{fLӻ1j8jaT#SD|C7줈xlv"nTtWuY^rݻh wO7?Ά|OImImۙYmH1!s $Ifv@[%BBzY2*$*s/\;(GbaΥk۰hs-^9ax2[9AA#{vܝ{,#e1}l"cpѴ}sٴ[ 4{{@1C*ݸIuE]Ҽ}6@|C`yp/+VzPЂkO?ca[bp7L,k1hv>eDfW765p۪ǥJ;ֆ@cqZ >V $QGμԫbil7w%CYW v  *V8!qtA'a켻YOJpP,@*a VŞosz(Mu/I֍+ˢJY`ZV'iJ6o<;lA2#v vG>\W uuUK~Rlndc0 УOAJ)ܜ6U%3f<7@S/e]/\vE# 2Ԙ$WOZI eIӮ!=.gc@&; aGU[{r-H olj-A2RjPbz⫻qgI>N1a! ^~!6 ZU W/Kx) ]b]Smk$:i|+Dg{6WGo{!^\7V[Ik X.I[%˱|;KI6e)j)'a s9;;m")ǗB,?dw;0m9 b$U$>R:a,r{ZSs;.5ZqUSbZXlԻ ȷ&~6PPIAL8s(V$Lۀ)(@QBEHs殲xoOXzńКov&6lЈ⺜ٍӣq :$)KJYB0{&0,5Y.⇅fmMZ Τ>;V~Y sE/׸u FZrV"ۦZxwBȅ&;8Yu wOet@fím؜Yź/Z9sWYlv"NݱБ^ "[1&@.GZF.=_/[_ r-XP[?FSrG?m:i:Lۭ~-露kA:+:U8W59xt3#tx>7O1Oge=ZAEC;6[Cƹ I+Bu2u /%$aKnljL>|Ć84L$`d#1 e`u$Q,}ܳ6Zpɻ 4D)ַ@.(nIѸ |Q!Bi@L$h˒䡠ڸY ˄ bV7Y^˶q&!>j\]HK뇃@3e=5Qo(M\(MΥB476gjA|gj mDrD*00u KQ﩯GiV'Q*ŧDVH1k&a2Ñ2&@F %sGژvW>/aOX5a1v,dwBxlzOX*9WUf|D"U ]NA "ny R0yi%VqkDҒ=c%P ff`GjslEU O#B1rvo+DNJ2ثU0gkFiM"|UBJJӳƂY )ͼߜ*US:I=ؔcxKmY/t-U⒒>Z i<'Y'TSr039XtG9LJ$?uNM+ג a,{/uk1O-6WTޣ m!<̬%#M$ v#!+A)sR-KZ$_S)RI*ܬx_~6#cI!1:`з2VF&1B ?q|/com!\ |QQXU ,2qiߞFmWZ*.D2]"eBDHRa4"D]^xT74@ȹ<G"!ech `1|*Zf8UDɀG< I\$<2#mܲ"O~eYME%aϲ^b!&/fnx:)^fXlHB0@S8$QqԅCOj()&&T $*E)LUS"R6ㆬGu,ѵ$7fkRLDmYʊdcX>"ԏJhܽݽ H;TGw(IqlllE56s)DMk8kZ8I^7!5a?w1Yu9ZG\oDW8DVmuA?N}{!aA SvkG]ᘒNo '(DKY"aeA*8HzI4?ƹ + K3@V\ci}Ib=kC5*hBE$FX`w.Ō׸!vv|5O>M, QtӔ0"(*|&o1Ѽ+ATTDhD+_tQc*@"e]xvq/fň/~݅ ).l 1B$J6xȣ^ K-$IFkB@k@]pSI)6j/BT!^QiC|9ވp-@MRp%rb nH<\-Oѩ X+x{c4$]뷩-cYЊ'`ܺ?Wq2uQk Hΰ?z!C,BN Ch',C lJ1 aJK*\2NT9Qdﭣ(sTeng[N*MsjC،/F-ӂ0xVf\Yk;>kuʍWprv o6،J|&ڳAy@&h|w pF8C.=˷) uon2<_%ǹZ?>$ o#Ę-t8'J`bكTp.Ui^u~=3=b M(32@_ -bz6rS1\5cW”'"bD:&LH XR¥lp8@5$yyޕ wW \]8"pvE oWHJ+GK[c #]%Il&H 4I$Lrcb (I$KdqDboEzSs.5J갢7+X+@W^Yzy|׍/ @)WY͒QRBR\[~yYQļT 8\sd!{6y'.uމKwRS-D#BF@a,uBA2Q$4&R q0)!#0g#+6T-BbGA >Jy* s}>|ĥ>qOܖ|zam5L^ pJ'F'lDkc#R*6*$1*>+6TmWl bt]S?>ɏ" gfђu G//6&*7lOㅹrZt0q.VGF'@kSͱ5  a1wF10R3xf%X^~}ût'zq1D g0BP`(1 `c(pzd݅m`yίy4{&1QsG'/;O?y0*Q{0xM3gU ݵWƞ_[77{@?n?ݙvwyUv'}4kw`+w G‹ǚ!B16g}76d'nܥ5M`A=N/3>k<1 3sc}#$W'. 9lcO0c ;o[3rtĪC&Ldq||{~yp@??k`h}_Sf8gZq7vN-No\&_OeC )8&tܓ2Bf[df~ f[|X.O<9ViE -b J͛;eEV6(٦Hھ%DUK4-9nнӝp.n`\DYXƙkX3kT O:,b}LRrYqEDT2TEI'\z,ΓmY]{_D)g-u h %A>mZCkJnx{kƂEpȰ3ſXJA:j͙qNE{UyS$ȐJ*Igr2QjD2Ľ% . :kD-۬Sn0* yB:Qx 1E^|z;AVi26; $1`sUtXdnv ot+7EG+M 1V Q{6<$4B*`&Wd.80C-12C\;dﯶzzllxNQJPGf́zZr5WWdimZRfdp%佩r…őN:Wr]օb8u{Bseq R4haksr5r}Yvq}}p5iY[Op2f}rX:8,ZDMfX<Y'z30K 4 Y0q-`Zny˷O{?82fG _hxs#M\B;}xχ=e YyϼԶDg,|4(^DO{~xf]r @opY<]G i^Eo 8HpȶKK㳟DdOuf Q2v w4ƩGd›@<dŤL/Wa"Ť\tDLJ4IZ Fkwa)H?qA)fS~ kw";Y). {ܰ_2zFљaT5@gi5>Ty,Q);^ǎݰ!|O re`yxЭʻzqd"cǚ[ìytKlJh4z"]{{z>iWq2d^Oo)}󻫿z櫄m ,߿ǿX"$:eXM˺26jsI֢2K1fRdpt ަk9 v8FZ_͇Gr/Gk}P6K94l _ r ‘*|e[k/N90tpwWmcSXzo ϟVňFaZhOYdeK6 *Qh襺gw] |%靵^y(x˝->O_&oxg_q\H 77W?Ò: LgEM:{ԼA]-Vfa*+>Ȭ5,ݣ^C Xi/$cB5hA[ @7Mba*|pHv|OݤdΟXphg|j˖!7``ze}bzMeĜ"[v`Okq쾂3Pfjv١fj1C17H̤>\;F9[KY5䬐(b Q`?QSHh ܫLlxVq}N,^8q|o߬9F2!Hlt] B f9 һ,7+}}EcH!aOIC ;$쐰'aW K%,q+4U&(,$v] YL{{ <;|hdY͉:Ad]{x7y 4אM2h"fYe 9LZۜcn!H=c(֡XOI\.bu(֡Xb}<źO6]v=&nU~QolNJ~ K6T31EٰV7SMꞈ7"i=#t쾝dDq5FQ )E-9-6ֵB:$%w;gh١eOI^~ C-;вC>](m{ sQ7f?lв-BZ1V&F: A,C߽Zh (L, t@^ZZ5S%h AHm|B#J% `lasPS=S龢1P:tnˇbѦۯ D=37*rQ6Wd#+L}R|ukt"N`L?k)Po` 1!>(6(}盶Ɉ%P@c"&mzMh6#|lY@v!ڃ C̞T1;(f+eW=LI!IqZL vM:zHU)()N dH%fQhZbNigYLz3sm/{n Gf EB(y"ct6h*_iCfx9%C }sRV\C }38vᖒS+m6ThUiojJk,N@E}g#ҍ fd}wQ/SS\,&ۻ^z}#\**UtaR1)E&(gJ8B`g&}EbՑ`g iqL ;N}_5C%5Mʎ$nwU+=FL֜ -Ⱥw>0QW=׷v/&N=D'E *AQsji\]mb;T+&)_D]|q$JnH E=~N<)aWhW@$c{H z0V83nTuF-)i`N￙0ӕ̸` g\# f&fe1WNy"_(IL  !^ӳWTq͇H@ q*4~|`#t SmJQ )Mڰ-!) >F)%4%, lN6kC[ح vk <QC qo>Ngq"ԄdDȨD!Œs&+2*-_Os? (|4 i6]BytqP=S7އ<ύU4(2Wq - 8QVv+m0?Ep~ ȩސ'Y \Vhڌj b&+Vnm^qkM+2тWJ7V>}CL `Onj޿ G}f G{g\ r,zQ44U,H-hP1( `'VDk'^BfWbicUwolz&ɻj򮚼&WWM~,݊e"/-&#euVdQx1P!0jC/&Y-WX/Z sMomo\g.JJ Ea!E# Y9w;S;a1J1RFcvf>QӉn8ͣ<`D=x덖as Wq)n Il`e>2:<7k@"O{?ώ~k8gyֹFup\929qܽ '_22u3uˆRGvt? xH"֋}.F`I3&ЌY.3 Śhn(h#J-r`AǢz4@aJFIA65>_aYß=DMvEP !*Rꌲ{L#4C!F|(IބtwR®f˽?+2/xϰ u0\w þI{1Ǜ:H˦r#$y75?oI6뀱r\&_Sf.ϛy8y^<`|P@zvUw{u*C8 ?SͯvuyPv0xrg0i#3On%opI)ሢzco0t=#LzwQ c%0+ K/Q0)#kݴ9A%؛0$MlRri\bo!RI(_%kw!'0.iC 9?p)Èp0ZC<k20']6( !*/_+JB:v9(Ӎmƞ%ŵzXAlY%mhx(}t$L*`A#Br"ܥj`fb LrzrTBJ5%0*qkm%.1U\80F`LzT2 ɸKZ%jeXNdP )KY?b;\ A@kORQpOdd 5яRRCvjrN`&2W"U ^ )`Xyn%T]VmzttC›ѼMNqGZo4 oq3޽PR4ԆXL7~A:?G2Bfj# ̏oMύOSW`Yω9[ bD$sҔL`2klkv}tbrt{b`g:h Ք'k$L1ϕ(h}4"xGany֋RQ1"@ǤW~j;>m'MU90F0)j 2C*J2GWOɽ}oV-7S>}ΩIM"ᾏn9WLMZJ{YOԀvw`r4p1f~}jO ]P! erKqy0b|KUo!%J\m1WpKkĖ8+}`5t[}%XMr6^Zk my$vW2} !aA^AsqE$xA=X>,#nW2pscR 5>D :`g6 Bد#JFaߥ#!Zv[L. }`rm!xM-%d% xpa=u6&ŮAAԠ[tuݻݻݻq6{,ߔtFbYҞq,0Hf1jpfEY:AiViAzƭ( )_9j՚躄4A+jqيuc ѓ#X3JJXK!F}tiMUF۽5ˊ6As<>:ۼN/:?ܿ: \2,7p0\Ӌ#٬*m']AMێ 2zI4|hU?_+#+|Dw|tq $l\яGU ӋH;?i{ y#ߧM >#luPie.Tw_..[j4%szKFȚ?wUS:kːw<:n4/ǕPB:=NUDNή. Yh*"j w;⿏{:,~&~Y"@GxGS[^7.T#{@Y?nN뫲aJ{~?xwuV%]~U@xëL=uwS u|uҬuץH;n];e},C뿟yk`4Of7yGy`'Sw@a)9;UrHK3yz`QWLcuj26ZAh2#bn}wP q0[*3a+g gD^к:=Qצ!}&{j]\U ھ4EgE"h}vn p}?t 5dU8n=o]fU]\\v}tS54ݎ:ޔidu}Y*d 'S@VE)vrߘ[խO7@0PXt/ΏZ?oAXZIu\Jٻ8nW\tU= HWdYu+K^Kn⫭45ыgZRt8Nr"|< Apº~xg? V$wC-W{_n7}Bw y[w+MN{/^o@XK}E`+;{|{4,^6!œwͅ$fs#h"<ě jkh4Wa8:sA_skHN?!ƻCXo5{uKus]]eҔ/GQUP@V;hB ?m}+_lT,gtv#7;^G~͝@NK۪^qu8={ yj^ 5y\!y\XN-9`4P>ޒ Sk{?nN¹x{ \E r׃ 8/EW"ޛmmaXQO.f闔JigofΎNk3+?{;ۃm].$7|dUk/f;w~Q~lwdAd%~bS<=84o_[Kzog(Qo. (ޏmXY+ϟmrS_E?e0Б]--~GvE16jv66^;39=rݗxl߇}dͽ7٧ҧd~uhCxEoHoomi_j ?P:`ql8z:x4rGJqkŅcN*-;=yUJ[i݃Bgb*v ?m=ΏO+|SON~;wUdz!z<܇|c>$}H~}H6vvv! gi]^xFջ|7I',$MN%m/]l98aH*_1ǃon|#s%`㾛Skr !TFfܪh +Mp> c"xQ3e1-91<339c>|vvIϵU1-lf_\1 11JvQ~ݴ -BFX3Bv1>[@ks&R!a 6T@dB;Ry(Apf {IP&p< }R2܁,5ž>+Y{u8'xd * XK :&Y?=zR[9#g2J Q$@1Mɽ+2Xr$~[́;aU' *+R"shhua+ lVJȬ@Lhs@,q-J+0A9MjB,l1hrXc꫹Թ4#x{ЂQr BX|P&V@y R+0oRkA$+Ijq<)Ht)TBΥu)Fv=\5ASNȖJ[IќL4O\,"SM4r;d֤[?-_t(@@dj  *vDAuFuAr#y/o`ɓ\zUߌ_3-o|=DAQkFEKQe #1;%LҎ`DAS֨Շ/&̍P!GFiw6 ^Rظ(D MB y^ |vQ9eͼVu$LݍC)B0DуѺ: #@cyS@ +WLREc-x4!k"Qr ořror'`%R!#xœLې{ƀdf%kʖ7j;߫"?P a^W3\(TAWA Pϱ l"1G^'sJ pD* k GcW*?̂@6Yܫ(j~8D5j[FL01Œ7DUS^飏V)3RY{p"ZYD4u%z U:Y` `VV:ǁ2c\c{$1ROϱ`c3>L IT+6p9i?<9#+l%ɹ%EKxuJ:[FJe {=$bm-NÿKVqs)tP@BW H1ފVIlA,Ja Iȓ$QLM AHsN rΖQ=;Zn"N_5,.&K*-V('DVyCǮ m7R#1w㲨|N$ GPUIP B St#.Qv *5٫ BJg)ں(g/V75 $&8LMZάa |5䎈c05%[bqyVfy`ZY5 304(pAʣL.fnK#*DPJ{/* 'h EInʟȺRtBw ㊼#kRq )@@\q1I1)I[eGdӅ^(,w"RVXRCd6KFA n~ieKSjH )cwAںbLd0Fn1JjC*#^ݕBz)p|FC*7D뀵wbrT$y&P1ډ,/I)31JpZԡ6jdlq#HFY\p/s{w&Q΢:eESy;x4[n;=o4.Ngn_+ʿ2sJxe$*n6/Y{dџ|=8y.7/[s'4-Z~W;IPR.oYjדMާ szQvG>٬t/\>Z$j@=d7}~/CTMyq8}ׂji6U';NNDmWs~^L^5B~WM3]]\ hN`:k`kQ[mg]pHm89>>2+o\sex8?/O,\3/OǛᇋTFC_E˿uNSr*+ȥϷA_%cu5}˻3ی]]SZo*~hf֔eb3R~֫C:#qbp(J9sl{#G@6 E9ۇn*JˋUPiSX]4 5m~p. e*^4 q5jbo| #Y~zo*Ckk}Xe*44O'_ooo#oaHV TDHgwqM#j9'sP?M-wo5:dmUF<6lѿ ]Yo9+BSOz2bFk U+# Ԯ=.겔s+buYEeST i=S?CGcWJ4է\Mv􏋿oY]cnp,|d-d\\˷YQucƺo2zrʩ+ }V5TJՊʪn8 5ntㅛvSd@57y;g3oz..M0v=qN7eYbn-)/?<Y@F8:`w8'& n[+%V0x~zJJQ00aLBkЍnT׃dT5|;eWn_xuڇCXN9*^otdeaeI4>e@tcx:sO1%stzE <3P `eUQV]4%ɬcCpɖ>Y><9L)権0/2U☝<*8 f$?=i+VfSӗ.JG7@o#K,BPN7L4b*Ll]1Sm|Z^-]*Y͕*J.B,o3J7S7JrE]24${6=.oBѵ"0,Pxe\ԦwCHk5u- #pxõ٬r[~en=e^A9dcfPEn7e *hp}I*:=jujRv)[kV5HVxlY(*ڗ+`;7րS#~NٶC\(ldV6@2eL %VY"-gm͒"%?Cc9Q ɓ<~ND=7ɮST~~>yF[{ZarIw6B:-ln!b#0ʻ"Y\XchE%5m3`F2GLo).fWŸbT]%K'WPc@)U.#pW0FL-"ztrRU 9ANBqq:B(PZbU1ǏV#3Aj ] h9C&Q#a8%y\).Pɸϕ+ ^<֓k^7Y )[ 'd0*ԥ腐3/!@&\sݎY蠌F:ΌսI=y`86oO+__&MA.6~p:/W7emVԫuؽԞG^QWnT{>"נVy&De<oÃ]u}|z1ts%?P]ԛ/͚/uk_/߼)RfnJ+བྷo{rϨgCߛ쏞[53# b)GJO KO_>7P&D).01UOJ|A/a2xOR[s[ 0RGU*˺&rr&2-AVeeH)%8W贰me_4$0@j,xTYYJpnJ}_ , FnsC+Gϋ֬WC%~\^cVh5bULgd܋gϋgϞbauUG"0sRz0>;0/Bp#)Fi`6?+*P1ߍ];qBHչ4D& #]QI"oP55)И&Y2ʺ:3˭p/oSITL\1zTdFI9*fe($)*ECE͚=8pJe>J~3|+\RDz]c.-tsye{Z_H,!P٥i"gZQ N*՚7,{-7E2Sdۢ]R%`!3XeR5B-l<~/-χB>^^c4pgE1q 5bf٫}D/>~:KY`pT!Oo^¥,~ׂ7jR+Lt)S+لϘS˓x G '1})f$sߛB&פh*S)AE4Y*UU2(6XF;M3΁;:>}6͍(jY ȡДV $V#`:y]VЀJk3unb5sHQh?_+w[|վ-nFFT  +^TF *&YeUM↫B+VRF!C4z;C{na}WLs=w Ю}>E)rN+w\OeFPN+ h^!R+π0u*fduJ*i7Р];t7Zm&qX(#'Y& 8޽40|2`Kk&4R_6֫r][ap]>i&c"k!JN}oC{*"7(w$pݭIzP2հȸ(5gVLTTv_gkKw~Bj Y0\r)bg&O-8Sd$ Yݓxti"{PmSg/r Eŕ㨲nu΅)qRVZHQKR$dOjbg~qL9E?WU/g\13w}oB{ބdDSƪ(o~֏hn& l5H# {cp?ii``E}tWWVHo#o.Zѳ7Zk<#d)=R2QgF2)d%^*vkZ>  -#t2se ^d-x<=W)/6:֘!';$w50>bmŎQ5É#s=X}ǟG~>fvmX/wj'5^X54 EmIIaS[禾]03NOFm}}zu'|kԣt^-nqwƲ o(Rquw BLX UG##tx ]~@臁d Oa[(p)z)B$z;OޣW\r22XnK#)?oY00[Khd=GXylڑKVt-L[߽<kC4@|iYe_#^5!d|WU-fA3I*0J1&^T#fo5~ΜU.T&Fd y2P+g/O}#ղ|a+Ɩ9. ?'dS 5 u cJm=[">D < c6}*_s& :Ր|7^N$f}^t1;nO/6N(0G??0pt"/Oݛ Ϧdm*/::֘1=' 0HhIw nD\ēEHCIwE}ptas=δ>jd4pc7<]:WpDZӍi6PuZXXtWM2CW&0(BOKaۉmAtڷ\Hh>ҰH.Rr9FߐFxug ?cDaԓK}k5 (FcNS,p{XBl?Z]eH1j~.%rEaД>rTj=&zK53j5bcQPU}ܘKZ٘sVU*~Q<ܪA!5adMmP AY d.,<S Q48)nq4tb@ aJ6Y"%"ΞIga I[ciu̢loZL#g=k`4 YS' hy%~)xFZa77}kucK3zo]ys7*SW&pܧZZmR5`4\2w !up$4@a32}U3 ]x6ew~(SV" ^߶6(@;sX{~gzMĦMs1Bf^7Ͽ>>x9?,ϝ-ypx]|>xzw/[ ?z,FW_v:O p3n^3A*v?U2` ׽VӾ잎@Q~]0~>;} V0: 57X_ |򍃵u1FI V`˅-:ڽ`SDZӤYiSv>5E(}BQq38(ۃ<1s'f}΂ ڣ|E*5Y"?wr&upzBN/qyq?ˢ&KŒl^E d5WIGҽءhR`fKv{.&2hoa'CmZ4F:CW x @hyLvs$ʼș}n]ϛJ,, bbb.@y?=w (?0 Y W_-k=Ha787(Z>EE3"(a痈nt /]fΥ/'$ %Xf2oäneMwo܁n捠i+s#jFȺiY¯l,;L8.@kX[;0k%6MIA-^3~ytt_F("L(ogu:8Ts83&1+3YR+b.A Iײr|bq(T~@g<d"D3y(Vt ߈(pMQ}Xv-^q} ٱ3@s{{@G!5 }Bw~#Ds2;~y$}sP*Ւ۪]z]j'7'2Jecu[5_1ԕ!f^`|Ļ!/i EѾ9)*<=`9v2$M%N1=,Ks[x9r!D.x '(as I x͹fYnD.%!ԯm_seg!{|;s8^ۛV0xhHFB{w9G^F'DoAQ*F{8pݽI- HҽAy˞/~ ap)w:;;yJ Щhy\e EaA/Vߞv9pԟEѿMvV"p,; gC7KU|Rq/_# ?0¸\קqO)>֟'2M,$bR+l,-wZ\>'Mأ|>2Q?TWA5pFC.nv|[-?nyy-o!-7' 2; ŚZ#⸢E>m:\ !r!(&c& ܺ| ͣ<4la~-n@h Z3e ⯸kgI p%qo9}!dsW@܇d(ޜҮK)FMF,,nvWfMFo2cb%\#,"nBBγJ:׹熖7LN&8nC ,1nx27퇓R 9<s* s,a·~DPغ~v֚!L2Z55q%܏!׏'~8 AȂEJ"C` Vz s!,D0"*,\Afe Eenl%Rx8×g9B 澄7G6}d#9f#,0EX ..>A7yBSxef_==qiW [KP|*AC"!+H`"q3"ub&,V J19tEesQV6KmG=$r(+5$p\=6{7xJa)|Q .-Z` AJl!r-\rZ(cGtYb?Jj^?&>->-تkqc &.XNJ2_LT H$P\;K5g ]ctq_aAm:wsW2EnCGD0'sNT.wǔ0 ap`Q 3-}\~aKG \4}Џӣ A,T*=?$= IFXxyRSh;!x9P8(a{X 1R!S 0E~W}L;2ŵXV މc܃|ntJiŅת BhSna׭sݛ*ebM1DK]9z>TUb+5L?C,ś•. X6 3iZ'9UQJl~]7[W1=Gr9]~ԏ,SǷYҰy wg.?n㳖|8 Ux:pds4i77b˳I°L;Q TU$LN Sn8ѥ4n ^kU!cFUf7~tL]reHJ,{EǺ2,3Zy)XCo#W(tAWfMȍ2@΀ {>})Iե7hQh3 \kn |О*R͍Fr r\kU 〷3L Iu:P`5ǩ{pK-!sG˽ܻS1 s7=E< Lķ~?PжO_c6>ʥ Ks>"Wyj oCz[ x҄ F8l)-rߝ2|6+r[޵l%ʇjwy?"KٶRE^]VoCy''@g~g|ޞyFyPڧ)}ʺe$OXItMTΚ-ec&X@K(:^˰0gˠͱr.yxKIk6(LC,gwԱƽsѯcѰ&ךyۄ5@' [;2+|U9o2EeFڞ|a˦@Xqa/'q`YEQȕqȃF4JI{.%U1LvgPlB / B:m#*귡5,-.4g^D%\i' .m~yչ$S(8OQJ2#'= Jr6U]kO\5uoE^ >+K q&x%}YnH-xDqӹj IޛE4>sf[,sˈΓ{j3bg<492½-?w]f.s= &wsDYo&z#׉䍨#߇q>.i,pil#2[%s43sĮJ){֥VݹQpݪK^?R`sL,D4 &0J, Q"U(%_*ShwҺeuGw;rRޒ+TO; h2Bܞ׮yOl~m"BqU|_q',kQ/| pk$R[ =kUxG^γJI#2W~*3\5^ܺi”+4+2Ws ᾌ=8'/^Rr{SUncKcNV?â(KIq[qu{<3ɜ.'k)?}\s<91j:!8w] KIHv_Խhatʓ^G4~ue@wW2o^7kU^E:zN\7B 91F/~ݷpGvK4ږ#v+Fd0{w ԄMKLgQ ra˳\ aqYi {+U|!EMYαdr{0I_V Z]TD,!m,a Z!TuEŏtz[ۭ <(i?i+ 0q&2idBeZS e`M2" wɉ|~/-NJQOz'^yV B@9r>DJ AxX;mۇcMץޠ<)t!%ynat Ka{q9a!o6RJbqhXpPh42ĘzI gqh41AJ1E !S?aZsNe.d[!;%D¼;EKJ\Û/WۘCsJL' Y h]è8EAoμY|{hadC@-T8-vB ;($ mSR<JBx޷BԼI[&q4GwJfUNv2gϘR;'s}o@@\dUῃ$N'!IRyw  ([ruI0=\Z/M~zn?v3s#ZRūċi;I2"!zLZtY߸3cQ&!<{GcȘjRH!;^7}{jl,8Gǹ'֮CMobs|К>q.ͧO'2rwɇсNպA57-j?=ZӨ h|o ;p8N<^9O vaj:,SY7>ڻNNAڋG {N] N83MN\"| jZuٗ+sm^_{7~ЭW &,l]Ϳ\-φn}1{|\ \}AIݙl3.MoYx0$ct Z)% T*ϻ15&/"ywS<ĈMI|gFh=M1ZAf~#vEzȒ~l" <+HiO>fmPwX lDJEa ",8|;e2-ȢXF ŹI\^p0QDƆ$ pd)L);h#2BB0;B { At\ Yt>VԲ="|8 XbM  FD 0MH0:2LE!L'!Đ0Zkm1MbE$(F,K@L@3Ⲯ(28! K"Ä,Y TG!' h0{=C^zRSpCzr)x ۈxe hi%D| 'e'೅7QL6Ab+c3iCM$" cT"0qOʠub5;% PV"!-12J"fC4[Z^]\^drn@+CSnKEgvhiB _H†H!nmڄ(K$AڔaɞRaVko &3]1%&F6,ag|0] ")"0fAawyP͓D6%3$@s榎:a XIi"l"1f(OMS ӒNpau ֵQwFW2ԔT 78&ǯF \yj`df\Jٰ$w\TӕNh*BMEF08&QUyO@T2X(M PE }-&1WO  ҝ])ॖcIv2ݾlQ$x|b5g|^]msF+,ڭ#=/3ʇںnjs~K5غȒVm@HKlOtfتތCޔVR+8)GpMJ3M H^]ƋNsݵ=U4]֌Fqf\hNÞV:OEэV't:Q?"oǛVf#b& xlvw6]Ա[o(`v^`>/s'+߾{שJJSr?AxUQjIyҒ;Zڙ@aPIy|Ӑ..zzJ T[ISh>ajD鰌rwtRH!L}!ve EhHC!J~Feȼ m l9(B%Z(coY F!yB(/k'|dH#D%sۺ dR9tDłn&ѵb>JCra"PHr.vupZ")#ma$Y֟V+hL`}"l$ lHz'칾 s7,P{C#J 6 -E~a xid`  ٌ56*C]he=H1g D .*2al\} h3e崂kp˱}<4_.?%1Uqh'3v.O~'>a7Nq]B('DɛzqD8& I]D)mgsU /NR>Q $U@I)ʎG3ԺPE4xL̿ʽN-0\4m(4VŤJ +,8gX~,Q7cEQVig8.C<=&(B(-X[q27_ҌeEB!Bʕ6YU/ 8Y%{goБR,!Ǝ;JұI M:WNA Fip^s$$;Vo"BSPJ~-.y%hRB#Je*8OWʒ[x?)%%ciEއ|A̼tc;#zGܑ1 ϯ׎O޼~ų-F|#E/hDb̙6VTI'|]P@2R§D_-pKS(9Ake ɫ2[(bQN#P ) T [(LVGQ1m?AS#ne ]hAc=Z;%6^z@FZ仂\qmbF(e Č0 y8ʗ2I Cΐf Az5RK3"?d['  Kv+nEg@Yك  m#ޅBG: z( )"7VȾ~ޱ1]CosN y(r<,OIݛz 8$d}윥̌`YxܰJUb7,vff*[ݤE(݌83M a=V1-7lV6B:@V 3s ަ:K$!l2)?g6XYuYl- {` 9v&]X;@j`9M.C{2ĎX`&K 9lXw6^+{V.emb{aoVjc3.'Gda6&H'6&q7qqc~I;EM#$ķ˲pC+ɂUQu]w6]3o^Һǐih; 33N4Rr {x\^nf`A堤f%)Llvw6]ӷ 7yaw;|HۢK`w.ixt:4xs;iX]3&qg ^4Ƌr$=jF .~nBsmVB6c83F m6CKvLv&0QT6|rypA̛ۛGLzkyt6;x. l;19ԩ*~**i7qNxuZ><5w(;6/?w%Wվ{RF0֎yV o#`eFoo=AR LucTrg@;K}n+rcU'@k~ * aywesɨ<,u 5%-.n?&\c_>_>|W>Y\?O Fs;4ZO/5ڮqVnхOQNkDJ#itg |UcJСd$NIJ:z2%ELI ajI<^G==ŝT.W"-c.'WWomޥ!}K?  ᭣qب1՘OF}|% = NIFc!! $YGmHVkTƉ5b,VTq.2 iF[w+nmQZ?x;m7.t=JYQ|21 H%!2l.%oJbdoB*A'YFa-I5>_}MRlHk.RXaU;-U[w>VDQݠ' KE.)D,̝;JEye&K*a <+zNtrTEBt:vEfFTb Dȋ+TبHA(If䟃ZWуao/ ]hY7bCCE_#Ukvr̰ǓBKѭxWpla[p;֘-3Jݺ W%BXlE ) AhTUj0dӳa6| dmߏglHQ+kMW˿题o'TRIkN~]j2ËUz( ˛ϓ%$c~8x$_eCvÂ01Mj9tE\^-I{c7zXB{,$&_<^KΏ會uJRx{b/]a1X>?c#ӼE<hk6L8/o ekx]{kuRYȳ!}'oγ?VS{63|?PMUqS:p=zWx*'4:p,h 9 fo~kC#4hA˓IҵMg-b4jG56"ɻ6m掸_˂E_nx5T[L 1}W!95\⨬V7+2.r~ʏӛ/׽B#ۊJ;F%Y1 8bކy/+PvmCDn߿vwξAXx`V,?]K&_T-`QhTSIN-!oMr\Zͥ+jq+h:"o`}viFTt'7׀פ_Ns;:b'{ޠkVK_j?ZÚ ;]pߦv~wᣇ\i~sK'G 4e8qm1GIwCOypyDH _G7?;I7e/y#[3įmqO|7InK`/W80:X6dYGX ʍ^_u h$o+ k3ȋpC[3%ژK]3E5@Ţ\O6KlEi/ͳ `܊⚖esrJ_mk۴%%'( !IEA&Iy FPGl4Nwe&0DfRwmm~ rT߻ a s56هX"e'Y?$% obgJ8욯B$+r2qZ ц3)tD7 cHwEs ?]ڀY6L&%u@5q2dH#N8xzeeyCQXЪ>Z!2 QqyOs(QD98$+}N"Mg%4 -қ+vMgx̪ͥqcUjV(qaj4C=1OӢzrȭzw7j0&n׷o_}x~X\~!VkǶoVA܀'LhGO,;̢_JnN׿gxTlr]C]]Oo}dA'F|` pVκ$0no^8XZ9!g^5'Bia ̿9~v$a&oLa&wQع+P-VףA5yz o/Na|`!CNnyqE ƑV{ b:Fwˇ 2%Ӏ Lէk[VxSU ﲇ~@#E!2ƣ靈gOշ}? ?ً[|G'od6plcLnAJ30&MA.l[+>B>a8Z5jromjB\ kʇ*=E[x(^Jx-zJl:˶;JZ -q%ɆM.9nqVAd&btːkf'n~j8r(?V.1,,Ÿ Ԅo'\h?I`:4roWĔQS'-4$eeEb$j 1+w!Eg9ĻWYwsej;؅Mg4 n_N^8v997mCZ6#^ -GV98xgNƕddGV?m0`iqoE5Wb}`6@4#@d]2(aQ9FŲmj#93U6U6U6U\nL[ٍCd)Oݖ&:{ag>V^ܒ"NkOJ)% 5  i12`|FA @1RqI#E*0'~qm&T R `;QG#Jbze[?ƹw6kXvn'0otkQzGκ=4{i"k{pvgSnAdHD'6KwoiA?+?{D/;KO4΋2Se1+=@  cjh]3KAY㈌*ie@!$ 2hvQIXrRRnB> ZPv;p$eUC=u!(⇾?d m7lvm^XM9QvYI͵s~)۔ e|K;o\G#2;4#:7ݿ-FKӺN.dI&9dI&lz,LYEuTQhns5E[YSxq kw9oڷ޳BG4Y|+2KWt8 ߒZ(tL0P[z?ysLlo٭V?|H;rL Ӕ3 j&˜}5r"k& 6l:Mwr(hMM8 Wɸa1ϧfK. ԓ_ۆLiﲂ %+VbemQ+='xU)jP|س|]dѢ6_$ңt9 <\~&K>&Αzu`d$޹r9TP=Pꛇ.W-\V[ם[ܥE*#TJ(\4HN0o.ꌿieBך9!?ugc w'T,)&(m/^k,?c@s'1WevUh=U΀nw T~r<6ꖿ%TE4)#MnٛS󖿹y>mwؽjLM4TxpPSeZZme?3cǒ0PM),ޙ;"^X`M߄MtO(%'Rw=e *@ۗҔ+B3` +$D_?a`^!\ N'Y'Y'Y'ui_A<1N&qDjIXi BCuxjOB..~(ɋ=R4)!SH4%ʙ D A4Lb7p0{e^`[Apz&|pw|_6 }=u;|L? *g|d8װ_2FD!pp98E8_$zkbWzͮMHl(&d3g[a#(jw}a}e59<ݍ4!QÄN7׊tS immBu.#;@_׿6kBjۮo>&MM }.'RAT/G!H`OoWШ192NO`wӒu<^9"vc(5t{G+X4IG *fQR:"k^cEtF2?LdF<υu9#T0-\G`W;F=Iu~Y/x?pըm6/@/}P0LϜz'G%j{6Q3,K*negEhV*`oP}:њ3* wtAO~I5/!XeI mHT,Jj9 2wzq?aU]K@G?"vK4t:R8*ht?U[Bzy5N B,n<ģu&<ܵS6$)p) S"@!:BU֧R kl1xϐ>ƭՓ̳ŝY 玚φifx_#l>䗃B鞣]/fC(n@ =qHsy/cK^i9ӗ]tϙ<[ݳGGr `P "тkΟ ,Iv"z>wE\\5E\\5q&LR@SP9eS.> MM41$K|PgۅZmx ȳW$we\&?e\&?ok$"i'qyT:0q楂LrvrUlYݴ|KPEٱ>; cv {M$h> uzfw hVߠ\t G{l-~_ofѯش5΅mW(=xlM ƣ靈կO}? ?ً[| ȾY؅9OxtQ~;gz<ޱ ݖ@(ͮ+c4}5[C Z] 3yG_imC\]>8+&~[_$JI<N\QVQm"W҄T!rj ( G9$) DPx6C4=}6*."Ś `d/@&d/@& zt KEa+`R q>بGo99A4KNj%((.]Ut'K43pp)V.>ke+ 8A[ǃtb 3sFI]`JFr[mBWRmV#(kb39!?R(>N HLQȄrg9ZA=adJP˞U€h MX*@k-y|9A&щ(9r&5bGh JF6q+I+]gH P$(h-Gv,qʠ AqpJ ܎:C[v ȶ g{HACu5 e!r,Qgzܺ_%JfNc{ٗdi -c`u>Z<:I^9AnQ*UE+N:P|о rhz)7e==^ Kt,<^~p8Yʇ-Z X%[R#HxGMFZiCbC30V<Aq_m7 Y}Lp20,r/ߗ)`ڛmNʍ&qx?A7dM`)1Nȶ}vjYW?_uP׼Jmaf+W9?^o:Eˌh,\cTEL]=/Q-iIfUvt!U.1}/詻ȤR:H1JuynУBFQzy yi(lZQF:A`M5Rc0\%:É!v( 6MJPߋ=:\pqgYσcr8(t$ά R66Ks<ӣAuPp;XI Ku10%t@T_6(od8K;;_YJ8i!\#?2@sI)}E\Y/0U>`á?%FP<ŜP@.%nGߏs F+*78`xm;zƱ⡀L1=цlPj6ߏ/ U(ұfcC j`$;eT(Y%sƔ/:GuMNJ .|i`Jp{Iޚ$>1"7ՖTjP+ 2nJҕfFQS'zF`xkm`IF4Ddf=2)nM[h3ϳOh87O3 ?ˋ7_{x: fsyNOHsg'oKCxJ64HjF^k ~{49mCmpȄ nX)pfq:£oxaMR+7ҏ]_];@#ݤݨdkߨ[]p} ưUUֽ+ZnTШDrr '{/,TԄD/fCGŕ4˳6VnZקsꕷmY 4n[LWkjVōƽk9sTpӊʙ:2yMΫu9E?>].pH1=|6g8f>K f>%-twv|c|uvl_ewnUMG'yOc2>ޕ 2-(/˷_vLQ;):$I+oe1+47bߎeLǔQ;ntu)- ZE[K=q?urMTN/{q2䇇h193ahUCf;]h9nTG\!KWs秭&ALwt:W'FȞH[] mQ~Q7DRS+RE SY8癢IGIo(W~&c|GB2:B {W|B.bK;qjJYp'[j1:Q Zߌua_yHн R qyTtIT%}㡢[zӿNu˞~NRMOTG IB,"3pe'T.]5kMYqI7DK׋e,p~z퉣$Pmq2pR06:qX`(|>W q74F;''~Bڦ, Z)xQޭu.#Pd[Hq6Ȍj*(vTfr*13 Rb5YmSB2R)ɪ!5iǐ(ƶu .6K- 9%[ʢZL^\ ڪͅV 7E<0Iފ.je Y)dH75:ۨcG-uފaTxM6 }X BVǚ& 4 5LTA Kc#lb+cM+N-v[rl`AP`XP5&nłs=YUSjgrr5"hz9UVk4ST\ϫ^zȵ:,_)Pz|@s-,P~ f%05-s]]CݧZ,P]=oݻ2hk:_9h}g; )hk׵;pLb& Nvޤ-JH3w`]lNhEY_͟Tru-X9(y͛9 ׊OuNٕxFٕjrqG|~%f:FKCtLcDdz~q?[ޱ<%7\3%жnG{{ /yX='^PD}lu~,2ݒUh=5VaBYZiˮޱCvs޺X-FO(ҟFw5:o(ktJF{w{_. &x 47`8Fȵ6~;Bb@kƥSp>>y;+M)8␃i݊N!Ѣ$^.T$EϜ[at~Yv4oݻ2?H~#R'<_t^|nܻc@D) c DP9!bLBSuUld$ $t[|it<Gʬ/ _ TX*AQ"#R 8QYϴ) '(h {pN[PT[ǃtjOLpBϨ#( cҘNqt>M/+埫?| IRܣn9if2!bOb?cjt_/PC^k3UG&N`u\ѠO0 p&Rajt{oL`NE@*RX+F fK-$A8?9SJ  ^)|ptFrj)43zAf0Oh#EX  c&I%8d "yo"` j ga"K,8keH^{+RC%52 0MO0G91,:.TcF Ў.VgQŽ!l IAd7'Xap& SuPF0xDыњ!:0A 3@Yt}.0rh;D͐5#TjNSD[7c_I*0ODX`Bi $F @Մ!:LN VӮ@c䚘!Ӡ>.ȡQ O4K;!8d F*"XMWzmtr[2\Mf^Nudp㷿(\NS{z:F8i2nF_i~btfO?X~钩cA<7W .Ɋ1]JgE|ő:$0\Pj#?{w>L3O:n|GeK C@o.PbDƵRHz1OykQÄa `?>R*OןGT&՘%UfPhe+By|]<^'zdڐ\mȻ|U١Muҡ6U"kuR=uUt7ud\]5TA;T.dX4|w$.LQB5TԁR1g *ʫFׇJTtTtָ$sz*:T]y+k ^ 8޾Ύ4ۑvd~)9v\N)16ѸcYAKWF D\{gKL&(SsO}tP.!Hyb!R܇C#4m"2Tt((H 7YiFqmGikNC(p3Ll"}l~4*n ڳӮl!:E%_sL]&Y E! P "@>\yOjh#r*}0Jԥ+$.?N/uOA$qq@_ⵅwTG bOJ+꼈WIԵk Z\C RI|br-8y7#Z^j|-{KO=ue0ghQbB1o8y%7}Z' Ja"I#>?Nf9Ɉ5?VʝC,ؙ-~+`.9{tjwi#\)WWʄ6Cֈ,z_]tٹdz8oЃ=)|(muzM(ZڈkQg_u+yeA?ȻGq3 _dF8.÷2<1UxSB8uvra$$o(hJJ7PDb(DbxTqEK tFC( &j +͈] xT//2_Pnа&Ek8aڻ\*VX-8\09YDÜ:k|)Q]7%:-曂k8ߡ#%ȗ#J8E @_rNaeV,p Ps.aˈNЭz(SBƦqc[f*MN/$ \ѭ&GfR3uHarlDUW:imrr$DXNnFFRqT5lNs(eUksV> $g4aԡ."VY(]P1iF;|=\-;Gy*ŷd0ߤ&G:4X2=^MNnurn ~|05jK)+FN@.;k4+9wVi[oWÌ 3 Ӝ?mk+F~ؾ/}n(-Em $*خeߡ1eI+_dyC\Fv™TL*K̤ʓBccaX|QRa ķ7̖#)s?/ͥ]ۓ쥳pfy쾓-'g.Jlp02sV6__cw6]wɧx $mV^gk0q$WN.u>IGAhzٷa<oG y2Ꜿs_9}ѿO韬K?^zwo߽yWӛx4Aw:wcg.~]w/~}g>}޹?0v=vڙzc?M4үdU:%6I]gg|u䇹ٹku֛5NTs}ɱ.~mWs;!?gggI]1x49 7O;o%d]9 \]Krc@K n SM:}sg:ϥ{]"/Wӹ.y蓝.rp{(l7-9/"~E5[OKWIl/a/Ύ~OpL9/sOg-d$y<Qmj]T8s:,} %fo>`pzxzz܀^(ū?bIMO_a< Ɲd@r}tK:|"|yvFpvs0ɾ-GiQ1(s,CkM=[Dُr*OG׃OߎNlz__|7}W0dS|Wy"Wppsr1GƳj߳;ָb-벎LVO+gV)Q]biV(>)OInT04+|`2QBPVtTk?  deBnvU~h5.l ,nyo6̵i?ϳ\7._cg׽e_\V~(;jT>) M3Zi1_Ke;pțP/yy@0yyKCySd-kwN׬}H4M9KvwOt7djF6ݓvM`n"?ftGpp4iIt6WdMuqVcphPm.^, BBXLS{V)cF@P$l tBvǍ !p=2BV)<@qa¹%FL@JiC+8ԊWP?' :szc7v.uާUD<+/;n;6?E ӽl5DO޿_ Ձ=ӚWRq2"ł(cM9Q6F.=LBCUCrSD0bQGvIj3aY&IySwxDWj87Y_ag.ǃ_TvZ?^i:evݔuSfMYTuvtqoSTW:1雠gݨ*9%}LP܈50+#&Q@o%~⧋_%& ; 'KVg=k\ɰ5 nҟ^؊QY_8-óy>m{{UҒB*%^VREHa2PVmK& cÚ3 \FrpuT*K .QPFӽzr)izGS ̂b]pi-\/TU0?VQ{Z]۞ #Jβ@< W ۖp8)3=ɭ8a 0ˎ+` qzk`,^;@7Hw+ .4Sv⯠p\% [mC%&$>g|/QrYdD?Lx9|{!(mc6X_f*;vɮ RBzkDGele4B:Q5~~ +6ٔ};z*P=;D)aMkt9Vum^\lƎr"X,by"ā!@VJ h 6%qL#Ѿ{򫍰>_NݲZFJ;ld8oaBndOqVL3DJߣa5ִGC8f?9^jMtl1xvZ7S?og =fGr<Ժ5{k>8PFfk3]˖zk[q\!CM1刔K*7l}IbToMYcB?(ϯ9Ũ) 1uZv{] xZV߳?dG\[J[@J W3I/<8E$Q;͎2'7r`RUeg>EԵQp6>֒NBV&?o9WzG[ wI`1DMāIhIGCW8QPvpIB5ri?%ڈ-Ȱ5V֊0jv2 ԲZriub/IY30NjΝ>}"\caA[1 3\@a+K4\N3 O3+g_/Gd_ҋ܃ + -~9&NV?Ysɬa9{s *L|.@W]ΩуޡD/@15JH(еvͤJAA݈Pvڢ2R4.AHp)ʬd怭]r,nc 1m jo S`ae2R`ŧԾGt3͟{D/fִ@Q7_Ȇ*ʿk Y h8ಊA:2؏A 0WVH1%lR=U9 } )l$"WmdԦ*4$Hx4@OAMJ@=ZPcW'&\*Ƒmb-Q#K) 44yTa#N 7C?&5m#iB۟^nunqW? lإBP["c!0YD=T" aᇡP%j[@cL!DYןɰ5 n5%+iONo輗Ӧϝx;9㞝0KRY }{9Ypjѝ LZx1qε˜0 yX#.U#mxH5JqrGB0*An:xIR$c:PS `sp$!D2*K;HJO$Θ=X2dTAbHSJkmFEbW2~2N8 dN6=ly%y&?ŖlEݭ׳3&X,UfP QV[N`د w*LRIqY`]9d$Q) BS#tت˜r4lyH ؚ`X|`F,쓹*2 ?wNY lƊ X0-҉" 9x-kL l/jH#G}EkO_ak7_,Naky5O=@],L&0pw}9\ hon AcNl(f>XP I.7feR/6st_}bՈ ưQԔ%\b+50$VNiITi!H߂2D.;Ti 35eRMI1*J'^h]i+| Ua,g9A~͔.mz+C<hFo"F&K)7FsqnJN]`VR\dA{cN8nא#:/˗`]{ ovuߊ[/ϒo>#t 2ՅZ9e$1hVXrfnX ?<,|ήhqsѷB9_ƃmwç=0 6X=I>@D ywFS[SԸ*Nn&֌<ַ|>w%FPiQQwox3jFJr!j]­CU28'0Z[@H.h3XMgr^g3ty66ԼO.ϸS|#M_MaZ-Ƙ<0oJM_XMT_!ɣKֽ: ,Ms̲EZje= AنoM٣pԈFD)V%͈FZ酝r ։A5#HA7n6)rM#$0B\/,mJRӷ"`deGkciw+ٱ %׏şTǺ&z cݣ> kw~&A!5A;A:^ ZwNf}7:A gTbYo!I K;  O^"! H%=KwxӉlP#V]aɣK⽲.4L'SNBSt1ḳm ۇ%kмۺ*/}Z!љlFXIKCN6G5b|Y<\C\=x<ÚBxi+C;J7D~Ɲ(Sx0xMaP~S(te}FS0q)U] vZwNf}7rG1*5 `FW;װ8 RZuFv3jݹSߍMN]gxpv'WT.?6%/#tBy9q*$7./xZR^vDBX,($xB%0JIyR*x3MWÑbڪ'.epؐ+"FC@īt8) |RWk Cɲ/:/lc(ahʎ2):V(ͤf!^4rp 3Z~RsUP;] &5Am&DZ.)5 ayOJMJաY%GR~RVbgXxs󑟵wE\r(Z*Ieh69d7K$l֤oC5 =GU1 UJ|J|́K_LJ0I`6K~YַlqE)7b9j|7r|X\f2}ay~떰Y.//.0PRI.n.o~^~٢x{-[<,r*f(_/T }# }9<\} ~ y [i攎`גuf 6*xe=[aם-ĚaԤp9#R9y5>j7BPқ5&簘JXIar}6fj cVM%'j1fA'+mjr,7"1_2p$U6pos;O@(ɧ[P '.Er "sso61(Mt"Q'P ]gtEo.JITCMYu*CJv^i%:Pj(, PY-B(u.h(׻CQ1qmiTc#yW9pLI90J|fzeDknlalGze*Acb 2֛Z 8yT'VKX8^uA/7=;~~en1"q]_|R]rg1 3Mڑ(#B\7<%}JLȋ0Y!Iq#iy Ga~~:5m7# ̉Bhy9t;9D%;jV`7LCϞ igۆ zd`С{U-3ĎbD+#oVW o&P:o!8C-f)p/ b{Z>|-w7n;$O~|@1c|1^(ƛWBtQHb-9PAFh/;22P:7GWaZp)-ڧTA/0#Sc_Ahl]oK 3MϽvL9ťz,)&,YEA.(S<".#Be9FaasjSψ|d3ogR,uY+aW 4{w(;|GBwjNX"Ao꜌NQ `~(#0d4$Ѻ!y// }DDžFz5WD:} b_ſqݔ[ Yw9WcoDs72QF0 {?s9kr0Z)lW3l" .J@`&G:u^T} H?UG$ ضݠ~S;nZvK3x=a7# #[UPT[up~^RQMHTtW8rS^w#*D4hQ y&~ayZRMŋz+SZNw"Fny}w J581W{4NԆDmUT\hgk^dIXb)6d,stZ@Zh㑀Yqc$QV+Y j[C7~X&oCFc+dZ]^9%ΨZ8B uPVaBeJ$44Ln.$_l>sŢO'7ve~Mq/>^GP p\ן|}y"y1MhAf-!;۠MqzƵj׸DZHxjJĐbcHN1^AP? KPyX_OSW_Cv=܇}G?^:~w*Eh0vNgZ}2^ FmPƝ$YxHI a<}>=Qp61,YX1G@e\Pª'͉ Hߛ{g{s:! PX©/4a  iN9K;d?(985?k\Og<\!ک魆w0@q9O?Grc/\ej )|cRMT]I5O:f5btYFN>8h>4ytc@o,yGTPjy2 z<,0憳' 1WD6qO0°=y5v z)﵏ ){ص -O~BPBL\#)@׹( |H W@>!m;Z?]M5lg !\繄Fi:i!;_#&܈ٵxZQdiTcbf~>-NB3?"D f>ƬzRpX.Y#? ם Ao`n< |]jȶ[5ec΁dgRq8mƓtyIhPfȫ"0(..~Z}RAm[A qh7rmQ8__(v p-)VXw+ Y݋e_Qc..LeB.Dlhw VA~#ǻ-0z:S{ m pݷL)/As} Rp/A$*Bh=C;mn͖s|(IJ+èf Zi/$yJbQX%>Ky<~:JЊ  +6 4~Ab W#ϹIL$@ͲysՐPU@|E.m Ҿ$5~ VMW)ؗ(g$H \y%K3ZL^jІ.CQq0y? 2>=>k$RBJ:LVX<]t>mIzMi riO^Ă`c;Vи'G`ӀZvinHyשA`D5g5_bK%`B&!gr@fV)G ZzßV#:~#`dtz >)Fư 7"QO58ny ޭ)Fw[. :eϻ%z1,MȦ,>)mBeg.Լ1|vاa*ai~Rh%8nfYjC&,=52YR&3KQR,K g4K.c)י`)e,֜Ù'Ҳ9\)d&o6Jzf T\v3䚎 +]߾ݶCjOXfo_` 5Å42nqk7d1P2mHAm>|d;p2_uney?Ŗ.{oqҳqm&B*DdV4y<#$CdiV{A* H#lK;cΟLMws\EB1U4r3˥GMI9FQr?A/~#Y?tXqUū'ڄakӧ-ΛhwѴm bM!wo_o<^[6Xy 㨟yQoz2ӻUX3wVE E5㾦-l\TmUM&I.Υ{Ӈ[z'[lӇo}rKɑ +u+~;7y N+xT=`w0d۩=Ywju{gx:lTZsp`#$clIá|U?  ʌ!JKjL,9P2Ȗvye)JG*h0N+u m!l22=;갣 (e6m≇"n;ʰ'ofg涕,e&rPL+ǝq$8-DͭL"&a6o>)'^=-/-$7|{& /$g0mLnqk?w.H^K#41F-y}o> On)˨f=Vt\mw%SSL~v$WIcղDx Dka\|[MM3FЃnYC.jh J@t~15hߵƒYPg=x &-i~ VOlTWb?ΨfZ*TNM#8Ye ϐ0W^x0!( V6X2둆u8k̜G:-?&DDD!ݎZH$Ru7wR*݉XhCV(tYK90=PӎWaf%DnYt.yÚRn') u$+L/GI'ǩnm-<-3oX|ax^q7Zw7d4lF=.:<BWd6 պu}j#`Kg \Y#g V0`շFfcnoYN;U$m{7!{!TQ!K [Ky7 9(OZy JNJe*0.V 6yE7t3t:Rj[2Zn6BuD>o1C8 JHbmq=Bp&}3$Hm5Z0%8_jJ(NJ)͓F߃,OѸQ'e1#jQ\47T/IIvSi\oh$Hc_ 1wvjvB3ݯmVY8Xp@`:ZB )_:-ӮWgt.m}iU s/P" AКSպ_KBB+)Hݗ`Eep|D!qj=Vozjm?y3 1'U=/vtբG11R˼vѝӞE%%&#PT8R( eD `_TW K;r~v>ΙEG_⤾򕏣I?!Ww/l"Ͳy?4d^4 6WY3x),ha} syQ}̿f80ܿy7Xxt/="45qVK('M9v4i@[AY?;W͏7n?̸^W?k\-Q1|ޡk1I߲oe^O=g-gbG jҏm݌[W;HU$]=M„ !WN{Xk?Z(yw~saQ_>4] Z?\4< r4Xz"֮iXh- FsCWdd}$aK,U$b8~_۫F]Ս~hwm~ X Mn5qDE)\r.E5U]NȻ8Zǒ @L2o%,8#Ri/MXMu !s'Ռ3#KNN'仱mRpc FWȈLr" aٌ@RPʘd] f]lWpjNI+a]Ɲl.l{[LɫyBdjSW?I5dC%kHf?[I/pNjíp'G!K MF/wd6|{pd(Mn;9 궭VՒ$flTX$XXCJh []AGff,/2EWe}ӳ~zV;ҧj\_?<꽘s{h!~W=0B(W9zM׷> `.lnq/?{ANe_kLJ:jgV>zvḼbR+bv-vTs / oOi@c5?=|L>IP2ާ7((O!kxwԗS Z/ B"m!9uɵ$=d^q=-I_~=gD;)3nj^FC;WRZ=C@. d`,{AL ]4~U.d&YB=0fP5OЬNj-b$\/q;_>N/6P7fy瓄\ n"E/]F; $?⻻w_!}X}?J1*[#Rp {/?ُ7?~gVok恞LЖa.HÛn1i ^5wD]̕ 1{߯)2(p!#Ŀ IJ&[~ny`x- & Z Ɓ/*.*"!P\T} 0; Cё/YݥFO#̤wݥ `5B^#MS:z #U ;&tZKJ=/ڒ4zsP T"MYOgl¡'iq:~w7q([Q\ucM"TxFe͞~Jr?w\?hy?tjqʓd=S%jyI7˩9d+1PSyP' y&`SsW-fGnNwx/ҩ|07nI68jĹݬwK tR#ƻ1;wKn}X7іMaM\qZAG;WaWLthmyfyf@5/ma ZXBoR6b[54z;w|x;T("[%FIJ8R b[-?S)Jr19ȣ‚vaz0҈=B_K^EEEuq E`_*oXdW#7/3oF^-Vȍx2<{1HE{㕨"F R~ɩΙL˩i}~U&NJ z^R"uc ?[S8*ߢ Dcsr$nij_ S9Mz|.aMzn ֔zj]EeJP0ͭ܀̝X+Bj) EY)%QW >^maRA2&` $1"*<` Ι4+ .J`V,rtC^<-y |Q$d]Z* 9/l*'xQ@ ƀ<79=[V,Ӝ[0B 0gF1c9wjq Zf$1#j똍UuU_) 2R),9gU|K?1+r֊,| }Xh |e\>$_Qʎ0iBFn"\4sJ#M|Sy("v \\b@ԃJ%g2eEb(f9R0.9-\*e̮Id5jp_cwRj; U^H[RK>*tCC0l\ѱ"1XuGDHQs_D}p,yFʣJU$W ɩғ7ޝFͻ(@K-FGU[j=b]*!5LSf7:r25:̮ y&dSFN~%nѻbb:FHf8@ևqMf{F6zR11nc"mtssTymg:,U[VW^26hލk!lD5Tr/Z%nHévDdT/Z) ,)olץ_w9>#85},~["'nZJڼ !/sSUqi0w̜Ur>!_"11 &2 R͎ V ;F)hwW0BP/^vbjouGgGd{JA]Ȭo% ͑yCF4'!6`sO۠ԸqcJ^ Cl{,.Ϊ<3*NJJ3T9P:Vܔ*f–ԾHdG*~ޫ ^}N$$!":3VRDS,DV3a}^B@TsUQmQ䞀)*PWƌ!CgsQVNcg29A2WFxc%-&:^&}|}>(}jծ= QumCƽh@ ]\P%r4'9PDu;Z` F%a`oVE'};֓SiDMZ^Ȟr,%Ԏި&kp3똷tX@l|6R1G4h plo*HŨpaXpcX!6t 'UD.܀+ , 4CL˜aZ:ĊP+pcӻq(s@\Q8 M9iEg+/Ke:VktY!*xC**]f,\B(NmP2)6RUXY΁9Ɋj6'y\ mV BXaù"/"/dr^犼+tڍh%q󤍐f2 Jg^ kԛ:vϤew'i$Kl&5T(.lIqE2n!By*Uńr 3XfB{g/$z]lZ3l8of_on{Õ Oҙcטs#86zn('wIPi"QA8F =Mۈ|1(%!,u"[Q,?T鏴qf-v/˞w^/:Q{ԷÀ BLOC=cAБ?9<k8mx/0 #΁=t>9{o#" 44Pm9WauIKRS[1F ¬ሶ3?l_k=<EwwDn1z!(8Ll%ţG+#}*=m$aQAlV]bF9"˸s)i[@1yP fPrC%ϜC;ҹwQ# x}lDB:&פH-$5FE]jEZW/R)yKF2Om/, Wy,ͺ]&lqLvv0 aN3NH lJ,+IFĕV,i\liNˬA瘰[]ZZ=Ne-TwV(+f?;rnFóxwE(HB\Ȓal(nCx3/YKʏ9!W&xm2c[SsAPZ$D|)@l@Ah2}!Rs}iw@?)}P({d;JrhOoXnN}A;n) @ 1tbos̾T+-9fYӄ5B ]s؟gbXS k^Zcei9y1y!K"eiu.ޖCE 9*)  TLi8[wM5;ћOѧ[Ǭujqw;[+cGv]<l KV R~C+YQ{RZiU_b.eWu5|]^*V_<%/ j35ED}\b\f-޿I3oJ ;+ԕHO]:w|su(9R{X+~2zҏ(yKZP>TUg(3B.GOeEiPY2r^pLAa"cu(2@جz&1)?'4om<<O$ΙiötJ<kf= dL<xabj߈|t!4k"rBҐі6s;|p[r#4 [F/qٟQ A&){`Z 0{QNG6-o"d7͠$bdBOt.! 9p yӗ 8PK}Ւqg+=e+2Jy*R.ì ӶRPaV >SKAYi-KOJ Rak_ jVzH꫶ٻm%W9@JlTj+9}T $A[Hgsi"%(F4fKK9RV^kKI}-~^nP]@wRKI.y)q|xG]R¼4ayy)ghyqvVD\4lU_VTҰUԊ鼽4jNg$Uͭқ]`U5枥楄l^JXRK.&aZ烨UԊ_KKHWu& ɷVR)elT duc,hut߿?u4Yhrk21VG̮$h:{,Z*1n[^L-h=ڬ#N_ sc~|cZyD$8ܹZۃy(\x /?H=$TntM!IPuAZ]X`faaScYX8f-\ u@ށJyv! S;ER$MSז(Q@2#GrPrw)sצog](*6بWWLR}B=QB:z8JVrY?dfQ;ڨ(Hm'׼*>Fآ:c-λ|6Q1AvGnJ(n 'RXg_:7 *i)gXe(VBF$b7c6~f6`,xlŕbf#ڎ*SK(ߘةk?H+G;Zsol9w&y'үՍ Hz/-~+US2W!le{異?r(E h|,h]@^WzqbQDx앛]>Yx 9#ybܿXyk#UW j`?ޗ}HPO\:=>zk\q+o aj5}ly&b^In?7M)}X:oj Fؘ"bAm(~+œ{;uKZ-Gm;:/i;{0nv\΢D^z:4blL 1֩ظkJc 30X̾y"5Xw0|гf1:1}f=~ {6h {),FFK)${խb&V:0/<] 1v :`Ƽ[& ݱ2OԙkϮhEF#IyY'N$Jm&:>y{\^Ƚmٍ,,>5`$8>yL08;wIDyDaJ$ٟᕩA@xk2-1\Z1A%UO.a%^+4̇rSqK|*񼚜@T F(MrH %@yXq qRAI%inTkHé@",Ubg)f 񡝈&@RdS{:6 ҆; RN\_DTԪ QiRaw<fȤWkj+/z3O&k!fVW݌Ifד&}NgVXlKL(&+ɬ^\%1iL7@b $V)!nZJ K ͐ 4NvJQxDZco)W{w#hC>V@Eu$CYc`Kg_bnX0Mp4\Ǡ+ ؘf N\r9ͱL>&_sm>?c7BN*!գtYL%VnVdL@?ܽH{f&?\ʥ‘ Fݕ *bȇe^w'=("aF1P~5U:r?> #XgWZ>>{90 ;;IJWycMͻBb,$[8oG(ZHН߬ldSJxFƧ(Kű{Vw`bQ/F#ĞTȆ1ivmO\䳓 kci#uԪAD&,F^p睇6*ʙK 6*^a/gCF+a;%m*R%Ն|8%P%2*썹ri֐gY9p1B<>RV]S]jj,KxqQ'5C R1ODς]DW+#mӡ5IN8&vOosi9I VGeY5mH6{?Z3 } ydG6%/]L?ug BY,g 7rS,!smɏfAYl5!oED7Btkש2ѭCLTAgѭ~)Z`tkC޸)qF7 u>Ltqڎn9F&<䍻O@c]m#lX?;ohЀr׹f țf&7>d7>z)@#$wS6+^w+ՖUnX=xEe}CR|Q DTQb,^H RZxtҤ [qַ6hN:q&^~.MgTk#1(&Yno< Ȑǜdh@4ASrAsl|RP!bPFl ~/rm@j"rP(jd-2%"@^p&"P1J N{˹)0>(f}q/Д1f J Y, rjlUZ+adoRF1ckhE9F+I45rdƾa!y YqM3L*匐CMiP1M3޼H .|0"cozZ.EJS"fܔ RU&,=lvyF6 ǠrH=d K!t Ɣ.4 ;"w Df0eTÀ ebhABJEp~pi%F~enVEaNl礙c HT10It(2c 2)qA1. eom8r#yp?.|kjZ5BD6$eaUG9SWm\f5 őcn7e93"t^jb1D7J. Dz'E2m^o[<ڨƆN7[_>,үՍ kWr7:z6 ޽&;QFH (I)\'XǙ;̗Ee-,N,[lҵtVBR^螠n[ 0et Z V2&"gYD08JbaGdrAsn^nrP< bA&=?@N-xWx=|B΁g7Pb[ZyT.Y]D8*q:؉zckQSɆ&(}T#D 1BX!D%,V} c]WK[tNV0?{WHQ/4eyE?.0XK*w*IuL oPrٲ-˙Δ,:|/"ZoS[{/"̛m&oE2c/S"M"M"M"y4;`R#" &xkqG6}@dV-|V54_Z2%QC_誼Hs']/>'y˟,8d%~aKbGH f=wcEػjcPyY-;CJ m = x˫݌U N?xm}!>/{=;T%}Zӹ꽫{/ThoW=3qGO$adl[׎.OصAu M[Sr{aOXmz=x ÃC!&+3뎂ulw yq/4R]:v\ Dٱj^+Bu0kNuqx<1ӕF-{gaEݝ؊ߢޫqE݋8~(xO10fUki6=tF uGҌMmˌ2#b ʮ50| uV[5Tj>DZxY5T?^?F 4s&=F?ZktQWkݸ,Tf'/%-;B^F*9%R!R% k)M&GS="ýf5uOo)_X56?iժ) v#Ϯ Ě_'-ч#~nDKr-`p$ew^v ~v.l威l˽sVWvK]R+ʕ2t}3}~_Ǧy#O]2[dSL]ˮ)fAnG߻_~ w+ O UY.9K"~$04#ON|rBQvVh;P9\%i<9J񒬷U7ViPՠ@&I+r kgKJިo +l۱5Xnb7+V:s5KVӧBŇy40,?҇&ER&F8}L]=7/償td<98dkO !F9g(ȖuqV̅$ґ#'*# ܌Ouħ&%?.r 2X-Tba\ea3o#ȈhB> D*J|3 kC+V:EԤi1|*boټEmɼbE{r ꡸jlq]#lZ}:hpإc,G 1śgh(//fx5{&үr _GrTiQ@tJ"@ vP)2aW;DӁA_l<x7DաBi mnz}вkd9 W9ds$..@jrFGFH~<X;^hgSYYP˃@6PZq!qRbƷY4'Tݷӳi+PVdH!*Fh=tPu]/.fCa餕#HfjBRmZR+PK>i[qye7kV.OOR;$p1a pHeus)){Mv &YpAH qX^|تFp;8(~}a1aFpƑlX^Á5 +D8zcoS= լFѳãtBU*tE]o:n$.`zWzdĨKJOЃuyGQi_vQmw6G^rƵ7̙0_VzɪRPLjR~й*kBH:-Y1L ZĒPdVUձ;sriEZyV_1g|JBI!4)j"%Ffiۭs$2-ôzŸ,TFfϱ( 9xg DzlŽ&5|qmj,kVTd3X@B1{gy{Q 2;C50U`JDT&aj"P H"De[>䙡&Sxd$;:Qz} \PZ*5iL(>LZ˳_Ez]1Gfgs=L ,[{ً2| G;Wv9'vvZi bm atS"r?[Ywf,v)`β$g^ N;+0clU@+;ĂY(*=%6oeYKvΉZ/O)[աYY YP;Ip|%2i []Ђ}z*r!DH@hT9T ٿ ;Ɖ4Z-xl0oR: ' -+-BnBR!9N;".k5b[捼goe|ZykmHT/D:ApvcWD$S=EQMΐZLG3U}]USkp}1$2r^nH>\oLA<]N2m) &:w%.2ڎa8#jzaC \#UD`&Ne+G߆.KWz>HŸXV͵d_ɔ* ?D zBs,$701=B/R㞟hgRk\;?XXtE,9bJ>)-9d-j@n6vW>*B2b[§euMkX]ﺴGN8̏8P$VEn)2ܫ,u1+'8=nݚꠄ:Mw{Tlޛwk ԻNFڶ3M2:5AQ`lrgEH&r@s)ĩ/PLyt"̞xru^  ꅵCwx'g/9JSމX^+I}=ĊX^XEn=j8Xԩ շ3J~sZ#& s@4D=+I!I;y+K7PQJx~Sț:tu? |4N*$ˑXvϻRO`e!rǹ<n(ܱ.8>[Z u܇.ds8oe8"AZh)rt^%tD(D # C$-cCT K,&쁆+J/>~PeQTaUm]>xMGn!-}HBr,TP!~)4I9S%d\cl?Yuk\u+:K},Ra2geTpGy"nCJSVy+}t/Ď9V5H$yӫGwf+]5>B!-e"p@b-A2z(1t(o Bs0gz~,i"09Ga5~z ` nOL\Gr;ڃїOWa28㄀!@]BpM0:4&|Uh|O'n$7Hޛ9Q H.=f ɞ:Yj _z΄bgRkZMzL(1({KRN\j:RUR'k_uMNgrP;DNsD 9&cd&nIRh+ʄ6ʰ",; tf[Ø&AOGlGaL`xN&' oGLҨvk/647W;ZoJbŨH(}w$TF1̿;Xc64֘olijaߛ҂(~3X&wSC[D[bHH)N EMX'3RX-G/06zͪX;B$==p8Eff 1V$'oR$l1]^>/37W`:CMfڣN[[sF$D2E:9>ܢDPxx?>h-BĎvIl;O gq@ 1ır2$beB39R{9WVBu2/ xaL%V@2 '-1 #V>P𜻨C@Ĺ䓤 Rj0_P Q P#)AVfx$``Gck"X&ϏWp\tHQxt!)TD_Q1?>P!w/)yO|IG,brYe(apyyWMo? i﫲=J9&Il+0 l"LJFa͔}LȲ"(Ba.S J"T`FS9h5Lj^|;hq۪A<8i+\݁M i>iǍ*E1~A5TJRaJjFv ehYc.< g_kh'jt#"OߘDV4шh"Ժx;%RWRCO2s. b/˂RAKX]Y^]&r T`r8vV +StTo\SC:λi0qx0TH8 c&Y%(mi5z^!|ҐEnzRHNxKbj->pU C~!TWliU-{dnf >҅ʍ#G=GHA@%8$%\oY)1ރe;&mA-\&`K]NaBElpG]!ǼE lYj9 pyMxK N',]'ٲZ,P8XJMKΉf,]'ٲԚ}dA2rs&:uR-K-(fG4K˪̼qbRAX:Yz,%5.y]V]WT`R-K=f)Qe,%pNtgRs||,"vt$`og?PkrUOX"^"*F=8Lߏ<*DJo:09??zk@}e@߮gq2W ڬ ƿ.e4Z11UkVh_{< 5\EVrQ[c%LL:5X`6ᡧ\pcѿ(GsZk5#7яr}ť{' ֱV1˟)HCLs5E#PUo<:~4Z\p[ONTyZv. p>09ylWş#/]9)[Juz\J<ĥĢ꺹e'XNe f)yKJVktgRk)ՇRXJBzY'XJyK+ 9</Q {KB^rshn^uv1p[`[%.Z6uFDey\^,MOly{8D '~POr|2*|Z=S+SvrwH=TNFtx=(քZ{!ā$ɸr7=4 Y{x 9$*%T:y;kb-Jg( 5:EBhB #a*rEQ$9ɕ"<fX@sY/uôͺxD]4SE6~'r,agJ{Z["m3ٶ֡VrJvyfSO-pEVgTHa#Ut&mե;Djh-AڳIޭH5B{jF[݊DRD"eWMɚ=ݨ6aaN<^ f^G{V{VM!=n7[c[; xC1T-kG;q^s,(eR)Z FnoFYiZV |nK#b:hnE0]o%ſn]x;wњO@RC)aK/UL iamb z*tۅ2{ hF10var/home/core/zuul-output/logs/kubelet.log0000644000000000000000004243006115136432733017704 0ustar rootrootJan 28 15:16:54 crc systemd[1]: Starting Kubernetes Kubelet... Jan 28 15:16:54 crc restorecon[4692]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 15:16:54 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:55 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 28 15:16:56 crc restorecon[4692]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 28 15:17:00 crc kubenswrapper[4959]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 15:17:00 crc kubenswrapper[4959]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 28 15:17:00 crc kubenswrapper[4959]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 15:17:00 crc kubenswrapper[4959]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 15:17:00 crc kubenswrapper[4959]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 28 15:17:00 crc kubenswrapper[4959]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.107152 4959 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112921 4959 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112949 4959 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112953 4959 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112957 4959 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112961 4959 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112967 4959 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112972 4959 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112976 4959 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112980 4959 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112984 4959 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112988 4959 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112992 4959 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112995 4959 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.112999 4959 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113003 4959 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113007 4959 feature_gate.go:330] unrecognized feature gate: Example Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113010 4959 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113014 4959 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113017 4959 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113021 4959 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113024 4959 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113028 4959 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113032 4959 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113037 4959 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113041 4959 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113045 4959 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113048 4959 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113052 4959 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113055 4959 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113060 4959 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113064 4959 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113069 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113073 4959 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113082 4959 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113086 4959 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113090 4959 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113094 4959 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113098 4959 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113126 4959 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113130 4959 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113134 4959 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113137 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113141 4959 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113144 4959 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113148 4959 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113151 4959 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113155 4959 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113159 4959 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113163 4959 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113167 4959 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113171 4959 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113174 4959 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113178 4959 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113183 4959 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113188 4959 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113192 4959 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113196 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113200 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113203 4959 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113207 4959 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113211 4959 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113214 4959 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113219 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113223 4959 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113226 4959 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113230 4959 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113234 4959 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113238 4959 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113242 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113246 4959 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.113249 4959 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114254 4959 flags.go:64] FLAG: --address="0.0.0.0" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114271 4959 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114298 4959 flags.go:64] FLAG: --anonymous-auth="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114313 4959 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114319 4959 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114324 4959 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114330 4959 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114337 4959 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114341 4959 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114346 4959 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114350 4959 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114354 4959 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114359 4959 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114363 4959 flags.go:64] FLAG: --cgroup-root="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114368 4959 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114372 4959 flags.go:64] FLAG: --client-ca-file="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114377 4959 flags.go:64] FLAG: --cloud-config="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114381 4959 flags.go:64] FLAG: --cloud-provider="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114385 4959 flags.go:64] FLAG: --cluster-dns="[]" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114391 4959 flags.go:64] FLAG: --cluster-domain="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114395 4959 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114400 4959 flags.go:64] FLAG: --config-dir="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114405 4959 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114410 4959 flags.go:64] FLAG: --container-log-max-files="5" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114416 4959 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114420 4959 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114425 4959 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114474 4959 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114480 4959 flags.go:64] FLAG: --contention-profiling="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114484 4959 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114488 4959 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114522 4959 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114532 4959 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114540 4959 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114546 4959 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114552 4959 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114557 4959 flags.go:64] FLAG: --enable-load-reader="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114563 4959 flags.go:64] FLAG: --enable-server="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114569 4959 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114581 4959 flags.go:64] FLAG: --event-burst="100" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114587 4959 flags.go:64] FLAG: --event-qps="50" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114592 4959 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114598 4959 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114604 4959 flags.go:64] FLAG: --eviction-hard="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114611 4959 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114616 4959 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114621 4959 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114626 4959 flags.go:64] FLAG: --eviction-soft="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114631 4959 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114637 4959 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114642 4959 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114648 4959 flags.go:64] FLAG: --experimental-mounter-path="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114654 4959 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114661 4959 flags.go:64] FLAG: --fail-swap-on="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114667 4959 flags.go:64] FLAG: --feature-gates="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114673 4959 flags.go:64] FLAG: --file-check-frequency="20s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114678 4959 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114684 4959 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114689 4959 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114694 4959 flags.go:64] FLAG: --healthz-port="10248" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114699 4959 flags.go:64] FLAG: --help="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114704 4959 flags.go:64] FLAG: --hostname-override="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114711 4959 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114716 4959 flags.go:64] FLAG: --http-check-frequency="20s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114723 4959 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114728 4959 flags.go:64] FLAG: --image-credential-provider-config="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114732 4959 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114737 4959 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114742 4959 flags.go:64] FLAG: --image-service-endpoint="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114748 4959 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114754 4959 flags.go:64] FLAG: --kube-api-burst="100" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114760 4959 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114766 4959 flags.go:64] FLAG: --kube-api-qps="50" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114772 4959 flags.go:64] FLAG: --kube-reserved="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114779 4959 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114825 4959 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114830 4959 flags.go:64] FLAG: --kubelet-cgroups="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114836 4959 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114842 4959 flags.go:64] FLAG: --lock-file="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114847 4959 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114853 4959 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114858 4959 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114867 4959 flags.go:64] FLAG: --log-json-split-stream="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114872 4959 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114878 4959 flags.go:64] FLAG: --log-text-split-stream="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114884 4959 flags.go:64] FLAG: --logging-format="text" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114889 4959 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114894 4959 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114898 4959 flags.go:64] FLAG: --manifest-url="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114903 4959 flags.go:64] FLAG: --manifest-url-header="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114908 4959 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114914 4959 flags.go:64] FLAG: --max-open-files="1000000" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114919 4959 flags.go:64] FLAG: --max-pods="110" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114924 4959 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114929 4959 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114933 4959 flags.go:64] FLAG: --memory-manager-policy="None" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114937 4959 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114941 4959 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114946 4959 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114951 4959 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114962 4959 flags.go:64] FLAG: --node-status-max-images="50" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114966 4959 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114970 4959 flags.go:64] FLAG: --oom-score-adj="-999" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114974 4959 flags.go:64] FLAG: --pod-cidr="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114978 4959 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114985 4959 flags.go:64] FLAG: --pod-manifest-path="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114989 4959 flags.go:64] FLAG: --pod-max-pids="-1" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114993 4959 flags.go:64] FLAG: --pods-per-core="0" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.114997 4959 flags.go:64] FLAG: --port="10250" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115002 4959 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115006 4959 flags.go:64] FLAG: --provider-id="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115010 4959 flags.go:64] FLAG: --qos-reserved="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115014 4959 flags.go:64] FLAG: --read-only-port="10255" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115018 4959 flags.go:64] FLAG: --register-node="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115023 4959 flags.go:64] FLAG: --register-schedulable="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115027 4959 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115034 4959 flags.go:64] FLAG: --registry-burst="10" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115039 4959 flags.go:64] FLAG: --registry-qps="5" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115043 4959 flags.go:64] FLAG: --reserved-cpus="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115047 4959 flags.go:64] FLAG: --reserved-memory="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115053 4959 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115057 4959 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115061 4959 flags.go:64] FLAG: --rotate-certificates="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115066 4959 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115070 4959 flags.go:64] FLAG: --runonce="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115074 4959 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115078 4959 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115082 4959 flags.go:64] FLAG: --seccomp-default="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115086 4959 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115091 4959 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115095 4959 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115099 4959 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115120 4959 flags.go:64] FLAG: --storage-driver-password="root" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115126 4959 flags.go:64] FLAG: --storage-driver-secure="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115131 4959 flags.go:64] FLAG: --storage-driver-table="stats" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115135 4959 flags.go:64] FLAG: --storage-driver-user="root" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115139 4959 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115143 4959 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115147 4959 flags.go:64] FLAG: --system-cgroups="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115151 4959 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115158 4959 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115162 4959 flags.go:64] FLAG: --tls-cert-file="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115166 4959 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115171 4959 flags.go:64] FLAG: --tls-min-version="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115175 4959 flags.go:64] FLAG: --tls-private-key-file="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115180 4959 flags.go:64] FLAG: --topology-manager-policy="none" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115185 4959 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115189 4959 flags.go:64] FLAG: --topology-manager-scope="container" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115194 4959 flags.go:64] FLAG: --v="2" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115199 4959 flags.go:64] FLAG: --version="false" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115205 4959 flags.go:64] FLAG: --vmodule="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115211 4959 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.115215 4959 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115316 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115321 4959 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115326 4959 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115329 4959 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115333 4959 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115337 4959 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115341 4959 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115345 4959 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115349 4959 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115352 4959 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115356 4959 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115359 4959 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115363 4959 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115366 4959 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115370 4959 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115373 4959 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115377 4959 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115381 4959 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115386 4959 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115389 4959 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115393 4959 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115397 4959 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115401 4959 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115404 4959 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115408 4959 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115411 4959 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115415 4959 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115418 4959 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115423 4959 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115426 4959 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115430 4959 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115434 4959 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115438 4959 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115441 4959 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115445 4959 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115448 4959 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115452 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115457 4959 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115460 4959 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115464 4959 feature_gate.go:330] unrecognized feature gate: Example Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115467 4959 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115471 4959 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115475 4959 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115478 4959 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115482 4959 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115485 4959 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115489 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115492 4959 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115496 4959 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115500 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115504 4959 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115507 4959 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115511 4959 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115515 4959 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115518 4959 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115522 4959 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115525 4959 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115530 4959 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115534 4959 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115538 4959 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115543 4959 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115548 4959 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115553 4959 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115558 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115562 4959 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115566 4959 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115571 4959 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115598 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115602 4959 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115606 4959 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.115609 4959 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.116575 4959 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.133854 4959 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.133922 4959 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134079 4959 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134093 4959 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134099 4959 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134128 4959 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134135 4959 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134140 4959 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134145 4959 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134150 4959 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134156 4959 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134166 4959 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134171 4959 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134175 4959 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134180 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134185 4959 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134189 4959 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134194 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134198 4959 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134203 4959 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134208 4959 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134213 4959 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134219 4959 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134224 4959 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134229 4959 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134235 4959 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134241 4959 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134248 4959 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134254 4959 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134259 4959 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134264 4959 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134268 4959 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134273 4959 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134277 4959 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134282 4959 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134287 4959 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134294 4959 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134299 4959 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134303 4959 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134309 4959 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134313 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134318 4959 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134323 4959 feature_gate.go:330] unrecognized feature gate: Example Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134327 4959 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134332 4959 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134336 4959 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134341 4959 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134345 4959 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134350 4959 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134355 4959 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134362 4959 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134368 4959 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134373 4959 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134378 4959 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134383 4959 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134389 4959 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134393 4959 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134398 4959 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134402 4959 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134407 4959 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134410 4959 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134415 4959 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134419 4959 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134423 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134427 4959 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134434 4959 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134438 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134442 4959 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134446 4959 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134450 4959 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134454 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134458 4959 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134463 4959 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.134472 4959 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134769 4959 feature_gate.go:330] unrecognized feature gate: Example Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134778 4959 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134782 4959 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134786 4959 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134791 4959 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134795 4959 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134799 4959 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134803 4959 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134809 4959 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134816 4959 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134822 4959 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134826 4959 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134831 4959 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134837 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134842 4959 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134846 4959 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134852 4959 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134856 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134861 4959 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134890 4959 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134895 4959 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134899 4959 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134902 4959 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134906 4959 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134910 4959 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134915 4959 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134918 4959 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134922 4959 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134927 4959 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134931 4959 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134934 4959 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134938 4959 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134942 4959 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134945 4959 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134949 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134953 4959 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134957 4959 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134961 4959 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134967 4959 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134970 4959 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134974 4959 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134978 4959 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134982 4959 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134986 4959 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134989 4959 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.134995 4959 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135000 4959 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135004 4959 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135007 4959 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135011 4959 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135015 4959 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135020 4959 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135025 4959 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135030 4959 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135034 4959 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135038 4959 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135043 4959 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135047 4959 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135080 4959 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135085 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135089 4959 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135093 4959 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135097 4959 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135101 4959 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135156 4959 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135163 4959 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135167 4959 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135172 4959 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135177 4959 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135182 4959 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.135190 4959 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.135198 4959 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.138619 4959 server.go:940] "Client rotation is on, will bootstrap in background" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.143775 4959 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.143911 4959 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.145582 4959 server.go:997] "Starting client certificate rotation" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.145617 4959 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.145815 4959 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-28 17:44:44.301618344 +0000 UTC Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.145957 4959 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.179236 4959 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.180834 4959 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.182585 4959 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.212214 4959 log.go:25] "Validated CRI v1 runtime API" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.337569 4959 log.go:25] "Validated CRI v1 image API" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.352374 4959 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.356878 4959 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-28-15-12-06-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.356913 4959 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.388212 4959 manager.go:217] Machine: {Timestamp:2026-01-28 15:17:00.371406502 +0000 UTC m=+3.817312915 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:dc1a7e00-f904-4756-a6c3-34f447e56131 BootID:a9652fc7-854b-4938-b708-3f704c68c5f5 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:e3:e1:88 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:e3:e1:88 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:56:0f:42 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:a8:33:81 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:ba:12:fc Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:d4:a7:cf Speed:-1 Mtu:1496} {Name:eth10 MacAddress:92:49:46:ad:9e:86 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:02:27:e0:cd:7a:a7 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.388581 4959 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.388828 4959 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.390126 4959 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.390455 4959 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.390544 4959 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.391705 4959 topology_manager.go:138] "Creating topology manager with none policy" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.391807 4959 container_manager_linux.go:303] "Creating device plugin manager" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.392474 4959 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.392567 4959 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.392868 4959 state_mem.go:36] "Initialized new in-memory state store" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.393028 4959 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.397182 4959 kubelet.go:418] "Attempting to sync node with API server" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.397279 4959 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.397413 4959 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.397513 4959 kubelet.go:324] "Adding apiserver pod source" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.397598 4959 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.401754 4959 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.403253 4959 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.405760 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.405909 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.405777 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.406100 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.425406 4959 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433325 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433411 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433421 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433429 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433443 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433451 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433460 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433473 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433489 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433498 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433513 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.433521 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.434524 4959 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.435202 4959 server.go:1280] "Started kubelet" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.435354 4959 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.435416 4959 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.437032 4959 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:00 crc systemd[1]: Started Kubernetes Kubelet. Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.438017 4959 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.440420 4959 server.go:460] "Adding debug handlers to kubelet server" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.441645 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.441679 4959 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.442118 4959 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.442205 4959 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.442213 4959 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.442360 4959 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.442621 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 14:05:30.448338528 +0000 UTC Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.542284 4959 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.545569 4959 factory.go:55] Registering systemd factory Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.545601 4959 factory.go:221] Registration of the systemd container factory successfully Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.545904 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="200ms" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.545952 4959 factory.go:153] Registering CRI-O factory Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.545974 4959 factory.go:221] Registration of the crio container factory successfully Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.546029 4959 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.546056 4959 factory.go:103] Registering Raw factory Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.546073 4959 manager.go:1196] Started watching for new ooms in manager Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.546242 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.546361 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.545949 4959 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.107:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188eee07b2cd4f7c default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 15:17:00.435165052 +0000 UTC m=+3.881071435,LastTimestamp:2026-01-28 15:17:00.435165052 +0000 UTC m=+3.881071435,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.547213 4959 manager.go:319] Starting recovery of all containers Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554455 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554499 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554508 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554517 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554526 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554534 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554543 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554552 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.554563 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556264 4959 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556291 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556306 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556317 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556329 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556341 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556350 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556360 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556369 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556377 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556388 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556398 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556407 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556417 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556425 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556436 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556447 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556469 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556480 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556490 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556499 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556508 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556518 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556528 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556537 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556545 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556555 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556564 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556573 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556582 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556591 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556600 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556609 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556621 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556631 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556640 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556649 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556658 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556666 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556690 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556700 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556710 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556731 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556744 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556763 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556776 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556786 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556794 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556805 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556814 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556823 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556832 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556841 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556851 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556873 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556883 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556892 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556901 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556910 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556918 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556926 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556937 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556945 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556953 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556962 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556970 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556979 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556989 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.556997 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557004 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557012 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557021 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557030 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557038 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557048 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557056 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557064 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557073 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557081 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557090 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557099 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557138 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557151 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557164 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557175 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557194 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557207 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557218 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557227 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557243 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557252 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557261 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557270 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557279 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557288 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557297 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557358 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557370 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557380 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557390 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557399 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557409 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557438 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557448 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557458 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557467 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557476 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557495 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557505 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557514 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557523 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557531 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557538 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557547 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557564 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557590 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557603 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557611 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557619 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557626 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557643 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557652 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557660 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557668 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557687 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557695 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557713 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557735 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557748 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557758 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557771 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557783 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557796 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557807 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557819 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557828 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557836 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557844 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557852 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557860 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557868 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557876 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557885 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557896 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557904 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557912 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557921 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557930 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557938 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557946 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557954 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557961 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557970 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557978 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557986 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.557994 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558002 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558010 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558018 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558026 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558034 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558041 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558050 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558059 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558068 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558076 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558086 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558094 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558123 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558143 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558153 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558166 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558175 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558189 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558214 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558245 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558255 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558263 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558273 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558282 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558291 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558300 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558309 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558317 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558325 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558334 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558343 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558352 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558360 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558368 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558377 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558386 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558395 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558405 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558413 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558422 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558431 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558442 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558454 4959 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558463 4959 reconstruct.go:97] "Volume reconstruction finished" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.558470 4959 reconciler.go:26] "Reconciler: start to sync state" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.567921 4959 manager.go:324] Recovery completed Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.579556 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.581304 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.581331 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.581341 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.583851 4959 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.583880 4959 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.583907 4959 state_mem.go:36] "Initialized new in-memory state store" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.584169 4959 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.585765 4959 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.585796 4959 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.585822 4959 kubelet.go:2335] "Starting kubelet main sync loop" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.585871 4959 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 28 15:17:00 crc kubenswrapper[4959]: W0128 15:17:00.586503 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.586572 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.599044 4959 policy_none.go:49] "None policy: Start" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.600027 4959 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.600079 4959 state_mem.go:35] "Initializing new in-memory state store" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.642564 4959 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.649609 4959 manager.go:334] "Starting Device Plugin manager" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.649792 4959 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.649824 4959 server.go:79] "Starting device plugin registration server" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.650335 4959 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.650357 4959 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.650748 4959 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.650868 4959 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.650876 4959 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.657094 4959 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.687195 4959 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.687343 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.688645 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.688890 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.688986 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.689254 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.690553 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.690606 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.690612 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.690650 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.690620 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.690902 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691081 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691152 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691504 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691535 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691549 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691753 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691767 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691851 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691952 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.691992 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692238 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692262 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692276 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692476 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692504 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692516 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692609 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692702 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692617 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692738 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692752 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.692764 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.693242 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.693265 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.693273 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.693417 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.693440 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.694013 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.694025 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.694042 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.694046 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.694128 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.694051 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.746798 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="400ms" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.750840 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.752014 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.752046 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.752058 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.752085 4959 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.752637 4959 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.107:6443: connect: connection refused" node="crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.759692 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.759729 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.759753 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.759770 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.759786 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760232 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760301 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760350 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760376 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760426 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760456 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760481 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760508 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760535 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.760557 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861629 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861686 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861711 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861730 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861744 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861759 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861774 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861782 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861812 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861807 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861836 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861843 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861788 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861865 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861870 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861873 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861889 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861894 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861909 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861926 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861924 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861941 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861952 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861954 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861971 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861982 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861986 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861991 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.861892 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.862079 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.953183 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.954380 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.954418 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.954431 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:00 crc kubenswrapper[4959]: I0128 15:17:00.954475 4959 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 15:17:00 crc kubenswrapper[4959]: E0128 15:17:00.954982 4959 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.107:6443: connect: connection refused" node="crc" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.015889 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.033550 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.039651 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.056004 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.060728 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:01 crc kubenswrapper[4959]: E0128 15:17:01.147846 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="800ms" Jan 28 15:17:01 crc kubenswrapper[4959]: W0128 15:17:01.251372 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-e5490218b8b05571243864d11d2a88033b74d5fb4450b58b29389062b45529a2 WatchSource:0}: Error finding container e5490218b8b05571243864d11d2a88033b74d5fb4450b58b29389062b45529a2: Status 404 returned error can't find the container with id e5490218b8b05571243864d11d2a88033b74d5fb4450b58b29389062b45529a2 Jan 28 15:17:01 crc kubenswrapper[4959]: W0128 15:17:01.251786 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-1c27eac2393ab5b925a656d75b50c68b930e9db16837ea2a76679fb1ea106640 WatchSource:0}: Error finding container 1c27eac2393ab5b925a656d75b50c68b930e9db16837ea2a76679fb1ea106640: Status 404 returned error can't find the container with id 1c27eac2393ab5b925a656d75b50c68b930e9db16837ea2a76679fb1ea106640 Jan 28 15:17:01 crc kubenswrapper[4959]: W0128 15:17:01.252434 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-661ada391f3c6fba1f0705386b4271d1eb0376668049a6a994c461c2a099fd6a WatchSource:0}: Error finding container 661ada391f3c6fba1f0705386b4271d1eb0376668049a6a994c461c2a099fd6a: Status 404 returned error can't find the container with id 661ada391f3c6fba1f0705386b4271d1eb0376668049a6a994c461c2a099fd6a Jan 28 15:17:01 crc kubenswrapper[4959]: W0128 15:17:01.253486 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-90c2949e444ee47b9d1817d2ccb857b9031eb82410de34221ccbc0063932fab2 WatchSource:0}: Error finding container 90c2949e444ee47b9d1817d2ccb857b9031eb82410de34221ccbc0063932fab2: Status 404 returned error can't find the container with id 90c2949e444ee47b9d1817d2ccb857b9031eb82410de34221ccbc0063932fab2 Jan 28 15:17:01 crc kubenswrapper[4959]: W0128 15:17:01.255512 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-b910c1d48fa6f4eb31400571b74c3a33c873cc79bb515cc0227cd87e2a962f94 WatchSource:0}: Error finding container b910c1d48fa6f4eb31400571b74c3a33c873cc79bb515cc0227cd87e2a962f94: Status 404 returned error can't find the container with id b910c1d48fa6f4eb31400571b74c3a33c873cc79bb515cc0227cd87e2a962f94 Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.355871 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.357361 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.357398 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.357408 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.357440 4959 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 15:17:01 crc kubenswrapper[4959]: E0128 15:17:01.358365 4959 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.107:6443: connect: connection refused" node="crc" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.438308 4959 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.443492 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 10:53:23.908145457 +0000 UTC Jan 28 15:17:01 crc kubenswrapper[4959]: W0128 15:17:01.467792 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:01 crc kubenswrapper[4959]: E0128 15:17:01.467899 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:01 crc kubenswrapper[4959]: W0128 15:17:01.539157 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:01 crc kubenswrapper[4959]: E0128 15:17:01.539298 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:01 crc kubenswrapper[4959]: W0128 15:17:01.547605 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:01 crc kubenswrapper[4959]: E0128 15:17:01.547703 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.590825 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"661ada391f3c6fba1f0705386b4271d1eb0376668049a6a994c461c2a099fd6a"} Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.592669 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"90c2949e444ee47b9d1817d2ccb857b9031eb82410de34221ccbc0063932fab2"} Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.593706 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b910c1d48fa6f4eb31400571b74c3a33c873cc79bb515cc0227cd87e2a962f94"} Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.594691 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1c27eac2393ab5b925a656d75b50c68b930e9db16837ea2a76679fb1ea106640"} Jan 28 15:17:01 crc kubenswrapper[4959]: I0128 15:17:01.595876 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e5490218b8b05571243864d11d2a88033b74d5fb4450b58b29389062b45529a2"} Jan 28 15:17:01 crc kubenswrapper[4959]: E0128 15:17:01.949295 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="1.6s" Jan 28 15:17:02 crc kubenswrapper[4959]: E0128 15:17:02.054490 4959 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.107:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188eee07b2cd4f7c default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 15:17:00.435165052 +0000 UTC m=+3.881071435,LastTimestamp:2026-01-28 15:17:00.435165052 +0000 UTC m=+3.881071435,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.158751 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.160564 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.160596 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.160606 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.160639 4959 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 15:17:02 crc kubenswrapper[4959]: E0128 15:17:02.161227 4959 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.107:6443: connect: connection refused" node="crc" Jan 28 15:17:02 crc kubenswrapper[4959]: W0128 15:17:02.184893 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:02 crc kubenswrapper[4959]: E0128 15:17:02.184991 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.356599 4959 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 15:17:02 crc kubenswrapper[4959]: E0128 15:17:02.359187 4959 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.438708 4959 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.444046 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 12:29:23.182721849 +0000 UTC Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.599784 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250"} Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.599831 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc"} Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.601167 4959 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434" exitCode=0 Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.601212 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434"} Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.601293 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.601996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.602020 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.602033 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.603837 4959 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688" exitCode=0 Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.603894 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688"} Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.603931 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.604911 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.604953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.604963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.606975 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.607152 4959 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a" exitCode=0 Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.607240 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.607261 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a"} Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.607867 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.607893 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.607901 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.608193 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.608226 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.608239 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.608615 4959 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288" exitCode=0 Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.608649 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288"} Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.608968 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.609852 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.609875 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:02 crc kubenswrapper[4959]: I0128 15:17:02.609883 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:03 crc kubenswrapper[4959]: W0128 15:17:03.279553 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:03 crc kubenswrapper[4959]: E0128 15:17:03.279674 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.437686 4959 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.445038 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 22:31:16.301096149 +0000 UTC Jan 28 15:17:03 crc kubenswrapper[4959]: E0128 15:17:03.550204 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="3.2s" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.614005 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.614060 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.614186 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.615172 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.615210 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.615221 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.618657 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.618678 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.619292 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.619316 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.619324 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.621649 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.621679 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.621690 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.621701 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.624982 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.625019 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.625032 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.625078 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.626180 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.626213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.626223 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.627041 4959 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18" exitCode=0 Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.627090 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18"} Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.627239 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.628164 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.628215 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.628227 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.761640 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.764337 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.764375 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.764385 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:03 crc kubenswrapper[4959]: I0128 15:17:03.764405 4959 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 15:17:03 crc kubenswrapper[4959]: E0128 15:17:03.764825 4959 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.107:6443: connect: connection refused" node="crc" Jan 28 15:17:03 crc kubenswrapper[4959]: W0128 15:17:03.815276 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.107:6443: connect: connection refused Jan 28 15:17:03 crc kubenswrapper[4959]: E0128 15:17:03.815354 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.107:6443: connect: connection refused" logger="UnhandledError" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.445600 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 17:00:21.614122247 +0000 UTC Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.632931 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74"} Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.633143 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.634418 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.634466 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.634479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.635361 4959 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6" exitCode=0 Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.635449 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.635469 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.635442 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6"} Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.635566 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.635476 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636022 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636284 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636308 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636321 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636525 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636535 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636613 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636631 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.636640 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.637648 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.637670 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:04 crc kubenswrapper[4959]: I0128 15:17:04.637678 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.121982 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.446041 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 00:34:26.156151039 +0000 UTC Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.643455 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"06b4ee0080e1c65f8310ac1207a98c59b15714c666839a390da6f8e4555563e3"} Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.643518 4959 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.643558 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.643523 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"748fbba6db4ab0b5e79078734446f3a00090bebae2f4591ba9f1bc17e6bb8a26"} Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.643558 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.643601 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6d0e73acc6693630ed8b0b44cbd4728f5f634d0c8eb2919b4b608c2167905b4b"} Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.644399 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.644426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.644435 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.645139 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.645194 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.645207 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:05 crc kubenswrapper[4959]: I0128 15:17:05.973881 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.447122 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 21:19:57.086284587 +0000 UTC Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.549823 4959 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.648211 4959 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.648247 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.648252 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.648209 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d8e42ad11b473d9e0eb5d2a6c15fa6425b3f0980bc463845723402abef1d616c"} Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.648406 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8bc2b61c48cd69c4512e49fc0ec433dfcffd9a50bd7fe6fa5aac2693a55ac1f2"} Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.649161 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.649185 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.649196 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.649161 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.649343 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.649355 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.965703 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.967172 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.967204 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.967213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:06 crc kubenswrapper[4959]: I0128 15:17:06.967234 4959 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.447738 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 23:16:13.624264728 +0000 UTC Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.496050 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.496264 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.497489 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.497549 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.497565 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.650599 4959 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.650633 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.650638 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.651752 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.651787 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.651797 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.651850 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.651869 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:07 crc kubenswrapper[4959]: I0128 15:17:07.651877 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.099342 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.099607 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.101365 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.101415 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.101435 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.448644 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 06:48:51.934592153 +0000 UTC Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.470336 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.487758 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.653288 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.654442 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.654487 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:08 crc kubenswrapper[4959]: I0128 15:17:08.654501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.151261 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.151474 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.152492 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.152521 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.152529 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.449503 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 12:12:58.606124252 +0000 UTC Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.510826 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.656055 4959 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.656135 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.656076 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.657139 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.657164 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.657174 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.657789 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.657836 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.657864 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.957783 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.957998 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.959299 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.959358 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:09 crc kubenswrapper[4959]: I0128 15:17:09.959372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:10 crc kubenswrapper[4959]: I0128 15:17:10.450078 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 17:49:10.219384359 +0000 UTC Jan 28 15:17:10 crc kubenswrapper[4959]: E0128 15:17:10.657281 4959 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 28 15:17:10 crc kubenswrapper[4959]: I0128 15:17:10.721541 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:10 crc kubenswrapper[4959]: I0128 15:17:10.721803 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:10 crc kubenswrapper[4959]: I0128 15:17:10.723326 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:10 crc kubenswrapper[4959]: I0128 15:17:10.723372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:10 crc kubenswrapper[4959]: I0128 15:17:10.723385 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:11 crc kubenswrapper[4959]: I0128 15:17:11.099901 4959 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 15:17:11 crc kubenswrapper[4959]: I0128 15:17:11.100012 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 15:17:11 crc kubenswrapper[4959]: I0128 15:17:11.450341 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 12:59:10.384303183 +0000 UTC Jan 28 15:17:12 crc kubenswrapper[4959]: I0128 15:17:12.451068 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 17:07:57.169163629 +0000 UTC Jan 28 15:17:13 crc kubenswrapper[4959]: I0128 15:17:13.452302 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 03:11:13.356242702 +0000 UTC Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.438701 4959 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.452936 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 05:58:44.564285919 +0000 UTC Jan 28 15:17:14 crc kubenswrapper[4959]: W0128 15:17:14.661797 4959 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.661878 4959 trace.go:236] Trace[363430709]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 15:17:04.660) (total time: 10001ms): Jan 28 15:17:14 crc kubenswrapper[4959]: Trace[363430709]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (15:17:14.661) Jan 28 15:17:14 crc kubenswrapper[4959]: Trace[363430709]: [10.001227381s] [10.001227381s] END Jan 28 15:17:14 crc kubenswrapper[4959]: E0128 15:17:14.661902 4959 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.669885 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.675243 4959 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74" exitCode=255 Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.675318 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74"} Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.675519 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.676558 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.676619 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.676633 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.677347 4959 scope.go:117] "RemoveContainer" containerID="9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74" Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.750557 4959 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.750630 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.755569 4959 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 28 15:17:14 crc kubenswrapper[4959]: I0128 15:17:14.755627 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.453869 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 18:37:38.890637682 +0000 UTC Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.681141 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.683713 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e"} Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.683956 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.685548 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.685610 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.685634 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.982658 4959 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]log ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]etcd ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/openshift.io-api-request-count-filter ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/openshift.io-startkubeinformers ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/generic-apiserver-start-informers ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/priority-and-fairness-config-consumer ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/priority-and-fairness-filter ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-apiextensions-informers ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-apiextensions-controllers ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/crd-informer-synced ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-system-namespaces-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-cluster-authentication-info-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-legacy-token-tracking-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-service-ip-repair-controllers ok Jan 28 15:17:15 crc kubenswrapper[4959]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/priority-and-fairness-config-producer ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/bootstrap-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/start-kube-aggregator-informers ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/apiservice-status-local-available-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/apiservice-status-remote-available-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/apiservice-registration-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/apiservice-wait-for-first-sync ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/apiservice-discovery-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/kube-apiserver-autoregistration ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]autoregister-completion ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/apiservice-openapi-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: [+]poststarthook/apiservice-openapiv3-controller ok Jan 28 15:17:15 crc kubenswrapper[4959]: livez check failed Jan 28 15:17:15 crc kubenswrapper[4959]: I0128 15:17:15.983349 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:17:16 crc kubenswrapper[4959]: I0128 15:17:16.455422 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 13:02:00.293052007 +0000 UTC Jan 28 15:17:17 crc kubenswrapper[4959]: I0128 15:17:17.455997 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 18:39:48.233199439 +0000 UTC Jan 28 15:17:18 crc kubenswrapper[4959]: I0128 15:17:18.457019 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 00:17:36.600542086 +0000 UTC Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.174765 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.174932 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.175977 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.176016 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.176029 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.186522 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.457636 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 10:33:54.349107558 +0000 UTC Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.692865 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.693817 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.693869 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.693889 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:19 crc kubenswrapper[4959]: E0128 15:17:19.747160 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.749657 4959 trace.go:236] Trace[1952530261]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 15:17:09.300) (total time: 10449ms): Jan 28 15:17:19 crc kubenswrapper[4959]: Trace[1952530261]: ---"Objects listed" error: 10449ms (15:17:19.749) Jan 28 15:17:19 crc kubenswrapper[4959]: Trace[1952530261]: [10.449205784s] [10.449205784s] END Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.749698 4959 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.750986 4959 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.751041 4959 trace.go:236] Trace[1240447974]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 15:17:09.323) (total time: 10427ms): Jan 28 15:17:19 crc kubenswrapper[4959]: Trace[1240447974]: ---"Objects listed" error: 10427ms (15:17:19.750) Jan 28 15:17:19 crc kubenswrapper[4959]: Trace[1240447974]: [10.427903943s] [10.427903943s] END Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.751066 4959 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.752064 4959 trace.go:236] Trace[1024865791]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 15:17:04.711) (total time: 15040ms): Jan 28 15:17:19 crc kubenswrapper[4959]: Trace[1024865791]: ---"Objects listed" error: 15040ms (15:17:19.751) Jan 28 15:17:19 crc kubenswrapper[4959]: Trace[1024865791]: [15.040453274s] [15.040453274s] END Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.752094 4959 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 15:17:19 crc kubenswrapper[4959]: E0128 15:17:19.753208 4959 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.756854 4959 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 15:17:19 crc kubenswrapper[4959]: I0128 15:17:19.957810 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.411138 4959 apiserver.go:52] "Watching apiserver" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.414029 4959 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.414377 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.414819 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.414896 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.414817 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.414994 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.415286 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.415637 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.415703 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.415712 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.415947 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.416392 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.417192 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.417392 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.417499 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.418949 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.420084 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.420098 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.420099 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.420301 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.438992 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.442838 4959 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.452455 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454796 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454838 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454857 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454876 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454895 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454910 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454927 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454943 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454959 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454973 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.454987 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455006 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455021 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455039 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455062 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455077 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455077 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455093 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455127 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455145 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455162 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455179 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455197 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455214 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455229 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455243 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455256 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455286 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455310 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455329 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455344 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455359 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455376 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455404 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455431 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455449 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455469 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455484 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455498 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455513 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455530 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455548 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455562 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455619 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455647 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455664 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455684 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455715 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455743 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455764 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455787 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455812 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455832 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455882 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455912 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455912 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.455935 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456025 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456056 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456086 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456139 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456163 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456190 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456212 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456234 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456267 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456301 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456343 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456367 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456389 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456414 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456446 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456479 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456510 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456546 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456568 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456603 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456637 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456664 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456694 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456727 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456749 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456773 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456797 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456819 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456842 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456864 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456890 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456913 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456935 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456958 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456981 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457182 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457225 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457249 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457274 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457302 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457330 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457797 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457918 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457954 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457986 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458009 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458038 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458064 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458089 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458136 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458162 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458184 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458206 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458231 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458256 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458280 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458302 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458329 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458351 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458377 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458403 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458425 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458450 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458475 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458500 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458566 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458593 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458615 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458643 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458665 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458689 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458716 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458753 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458778 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458804 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458835 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458860 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458885 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458908 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458931 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458953 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458981 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459003 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459027 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459050 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459077 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459134 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459163 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459188 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459212 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459235 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459262 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459281 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459298 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459317 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459334 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459350 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459421 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459442 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459459 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459479 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459502 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459521 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459538 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459558 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459577 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459598 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459616 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459634 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459650 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459669 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459685 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459702 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459723 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459741 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459758 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459776 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459797 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459815 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459831 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459848 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459865 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459896 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459913 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459929 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459947 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459962 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459984 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460017 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460045 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460063 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460081 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460099 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460174 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460195 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460212 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460228 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460244 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460262 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460278 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460295 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460337 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460359 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460380 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460398 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460427 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460445 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460464 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460483 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460508 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460527 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460547 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460567 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460591 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460613 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460671 4959 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460683 4959 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460694 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456266 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456301 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456481 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456623 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456863 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456931 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.456988 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457296 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457364 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457488 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457485 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457682 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457705 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457744 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.457885 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458034 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 19:02:44.194698666 +0000 UTC Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.462312 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458313 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458797 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.458759 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459015 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459127 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459282 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459242 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.459618 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460025 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460238 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460554 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460620 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.460718 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461182 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461265 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461312 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461396 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461479 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461509 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461403 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461682 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461774 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461857 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461915 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.461954 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.462414 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.462889 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.462963 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.463013 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.463505 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.463545 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.463604 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.463737 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.463762 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.463883 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.463838 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.464126 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.464227 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.464275 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.464338 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.464408 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.464575 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.464815 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465126 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465162 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465233 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465346 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465425 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465883 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465888 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465938 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.465965 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.466060 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.466568 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.466703 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.466663 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.466791 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467075 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467311 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467338 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467401 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467541 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467655 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467676 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467740 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.467880 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.468569 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.468648 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.468663 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.468907 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.468963 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.469199 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.469223 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.469523 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.469560 4959 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.469791 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.469970 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.470018 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.470215 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.470236 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.470286 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.470334 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.470338 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.470474 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.471459 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.471623 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.471972 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.472042 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.472076 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.472360 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.472403 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.472472 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.472562 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.472747 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.472880 4959 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.472965 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:20.972940422 +0000 UTC m=+24.418846885 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.473095 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.473670 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.473857 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.475391 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.476148 4959 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.476259 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:20.976234607 +0000 UTC m=+24.422141070 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.476391 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:17:20.97637878 +0000 UTC m=+24.422285263 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.476839 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.476923 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.477234 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.477214 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.477289 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.477882 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.478001 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.478617 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.479030 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.479094 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.479157 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.479453 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.479995 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.480295 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.480418 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.480801 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.481156 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.481311 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.481486 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.481555 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.481666 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.481782 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.482174 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.482497 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.482765 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.482940 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.483043 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.487092 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.488083 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.488284 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.488309 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.488324 4959 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.488448 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:20.988417266 +0000 UTC m=+24.434323649 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.489459 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.489628 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.489937 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.490539 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.490750 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.490784 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.490831 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.491215 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.491280 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.491336 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.491358 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.491523 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.491649 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.491908 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.489953 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.492180 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.492098 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.491205 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.492395 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.492496 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.492682 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.493262 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.493360 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.493384 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.492971 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.493899 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.494140 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.494280 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.494094 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.494511 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.494579 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.494573 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495162 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.494666 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.494883 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495144 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495216 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495246 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495293 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495313 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495496 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495490 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.496697 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495438 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.495913 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.496762 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.496772 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.496784 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.496811 4959 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.495808 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.496946 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:20.996906482 +0000 UTC m=+24.442812865 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.496946 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.497503 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.497869 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.497995 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.498704 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.499393 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.501196 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.506503 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.507049 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.516148 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.516604 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.521726 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.523383 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.527965 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.528302 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.536804 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.545920 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561536 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561578 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561634 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561645 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561655 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561663 4959 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561671 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561680 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561688 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561697 4959 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561706 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561714 4959 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561721 4959 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561729 4959 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561738 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561747 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561755 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561764 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561772 4959 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561780 4959 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561788 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561797 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561805 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561812 4959 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561823 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561833 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561843 4959 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561853 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561863 4959 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561872 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561881 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561890 4959 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561900 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561909 4959 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561919 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561929 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561942 4959 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561950 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561958 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561966 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561973 4959 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561981 4959 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561988 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.561997 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562005 4959 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562012 4959 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562020 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562029 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562039 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562050 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562097 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562131 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562146 4959 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562156 4959 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562166 4959 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562177 4959 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562185 4959 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562192 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562201 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562209 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562217 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562228 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562238 4959 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562249 4959 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562258 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562270 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562353 4959 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562367 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562378 4959 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562390 4959 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562402 4959 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562413 4959 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562424 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562488 4959 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562499 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562510 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562560 4959 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562572 4959 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562583 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562641 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562653 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562664 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562674 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562688 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562700 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562710 4959 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562721 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562731 4959 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562743 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562754 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562765 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562776 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562787 4959 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562780 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562842 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562797 4959 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562893 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562905 4959 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562917 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562928 4959 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562940 4959 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562955 4959 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562966 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562977 4959 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562987 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.562998 4959 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563009 4959 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563020 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563030 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563041 4959 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563051 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563060 4959 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563069 4959 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563081 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563091 4959 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563121 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563134 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563145 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563155 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563166 4959 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563176 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563187 4959 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563197 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563207 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563217 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563228 4959 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563239 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563249 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563259 4959 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563270 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563281 4959 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563291 4959 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563311 4959 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563324 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563335 4959 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563345 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563355 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563365 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563377 4959 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563387 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563397 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563418 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563430 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563440 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563450 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563461 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563471 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563483 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563496 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563507 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563520 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563532 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563543 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563554 4959 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563565 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563575 4959 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563585 4959 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563596 4959 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563605 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563615 4959 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563625 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563636 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563646 4959 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563656 4959 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563667 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563682 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563692 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563702 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563712 4959 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563722 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563732 4959 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563742 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563752 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563762 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563772 4959 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563782 4959 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563793 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563805 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563815 4959 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563827 4959 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563838 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563848 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563859 4959 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563870 4959 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563881 4959 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563892 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563936 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563950 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563961 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563972 4959 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563983 4959 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.563994 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.564004 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.564014 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.564025 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.591942 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.592668 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.594462 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.595225 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.596772 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.597389 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.598159 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.598280 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.599661 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.600323 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.601011 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.601528 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.602180 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.602760 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.604472 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.604956 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.605847 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.606474 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.606838 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.607727 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.608327 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.608762 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.609343 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.609801 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.610222 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.611189 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.611578 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.612655 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.613296 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.613746 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.615692 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.616611 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.617318 4959 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.617460 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.620488 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.621344 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.621833 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.622285 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.623851 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.624593 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.625513 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.626188 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.627291 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.627731 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.628729 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.629360 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.630304 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.630747 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.631778 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.632271 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.633358 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.633804 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.633797 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.634630 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.635074 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.635664 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.636613 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.637094 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.637990 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.643416 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.643958 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.645394 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.645537 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.655477 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.670112 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.681815 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.693605 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: E0128 15:17:20.703211 4959 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.704323 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.716623 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.728271 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.728327 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.734583 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.739198 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.742091 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 15:17:20 crc kubenswrapper[4959]: W0128 15:17:20.743955 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-9c81907b6cee7cb6ffde41eb36c419aadb0e692f3c7f21793e0ea653de1c10be WatchSource:0}: Error finding container 9c81907b6cee7cb6ffde41eb36c419aadb0e692f3c7f21793e0ea653de1c10be: Status 404 returned error can't find the container with id 9c81907b6cee7cb6ffde41eb36c419aadb0e692f3c7f21793e0ea653de1c10be Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.751884 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: W0128 15:17:20.762881 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-a181a693b85d431a8899ab4201a08aaad7e0f98a1f07ef3d6209a14263510209 WatchSource:0}: Error finding container a181a693b85d431a8899ab4201a08aaad7e0f98a1f07ef3d6209a14263510209: Status 404 returned error can't find the container with id a181a693b85d431a8899ab4201a08aaad7e0f98a1f07ef3d6209a14263510209 Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.767102 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.978624 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.988874 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:20 crc kubenswrapper[4959]: I0128 15:17:20.998542 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.009592 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.010255 4959 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.018530 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.027924 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.037090 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.045274 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.060654 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.068053 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.068181 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.068226 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.068249 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068296 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:17:22.06826048 +0000 UTC m=+25.514166863 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.068359 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068457 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068479 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068493 4959 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068498 4959 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068546 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:22.068539356 +0000 UTC m=+25.514445739 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068565 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:22.068557727 +0000 UTC m=+25.514464110 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068611 4959 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068694 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:22.068651069 +0000 UTC m=+25.514557452 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068774 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068786 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068793 4959 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.068858 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:22.068851084 +0000 UTC m=+25.514757467 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.462424 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 20:08:47.236885986 +0000 UTC Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.586798 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.586857 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.587001 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:21 crc kubenswrapper[4959]: E0128 15:17:21.587153 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.700972 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27"} Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.701043 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2"} Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.701055 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5034a13297808a114adc27fd57c6e55422a4e593eff15a702ec19e2c473e7d8a"} Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.702937 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed"} Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.703027 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"9c81907b6cee7cb6ffde41eb36c419aadb0e692f3c7f21793e0ea653de1c10be"} Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.704231 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"a181a693b85d431a8899ab4201a08aaad7e0f98a1f07ef3d6209a14263510209"} Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.709703 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.717811 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.730496 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.742825 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.754242 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.767663 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.781554 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.795842 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.809303 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.823024 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.835250 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.848967 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.861220 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.875257 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.887663 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.902980 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:21 crc kubenswrapper[4959]: I0128 15:17:21.921920 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.076764 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.076902 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.076949 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.076976 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:17:24.076954339 +0000 UTC m=+27.522860722 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.077004 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.077030 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077082 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077077 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077099 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077130 4959 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077141 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077146 4959 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077150 4959 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077160 4959 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077191 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:24.077177855 +0000 UTC m=+27.523084238 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077213 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:24.077203485 +0000 UTC m=+27.523109868 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077228 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:24.077219406 +0000 UTC m=+27.523125789 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.077239 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:24.077234626 +0000 UTC m=+27.523141009 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.463599 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 04:15:46.58137067 +0000 UTC Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.586187 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:22 crc kubenswrapper[4959]: E0128 15:17:22.586331 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.708046 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503"} Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.727084 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.740822 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.754286 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.769084 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.785937 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.802545 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.815000 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:22 crc kubenswrapper[4959]: I0128 15:17:22.826338 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:23 crc kubenswrapper[4959]: I0128 15:17:23.464435 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 08:05:46.223664971 +0000 UTC Jan 28 15:17:23 crc kubenswrapper[4959]: I0128 15:17:23.476890 4959 csr.go:261] certificate signing request csr-qdkzs is approved, waiting to be issued Jan 28 15:17:23 crc kubenswrapper[4959]: I0128 15:17:23.514766 4959 csr.go:257] certificate signing request csr-qdkzs is issued Jan 28 15:17:23 crc kubenswrapper[4959]: I0128 15:17:23.586322 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:23 crc kubenswrapper[4959]: I0128 15:17:23.586393 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:23 crc kubenswrapper[4959]: E0128 15:17:23.586461 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:23 crc kubenswrapper[4959]: E0128 15:17:23.586609 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.094162 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.094261 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094297 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:17:28.094270908 +0000 UTC m=+31.540177291 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.094321 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.094360 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.094383 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094449 4959 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094517 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:28.094498343 +0000 UTC m=+31.540404726 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094534 4959 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094577 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:28.094566165 +0000 UTC m=+31.540472548 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094643 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094663 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094679 4959 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094681 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094722 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:28.094711708 +0000 UTC m=+31.540618091 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094730 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094748 4959 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.094831 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:28.094802191 +0000 UTC m=+31.540708574 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.464845 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 16:49:08.96904097 +0000 UTC Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.516155 4959 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-28 15:12:23 +0000 UTC, rotation deadline is 2026-10-30 07:38:39.895857983 +0000 UTC Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.516205 4959 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6592h21m15.379657145s for next certificate rotation Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.557641 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-bbjnj"] Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.557977 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.563062 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.563371 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.563504 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.563722 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.567126 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.569749 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-j879q"] Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.570273 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-j879q" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.573082 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-r75mw"] Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.573523 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.586324 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.586630 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.586384 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.586557 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.587030 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.587208 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.586874 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.587430 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.587644 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:24 crc kubenswrapper[4959]: E0128 15:17:24.587782 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.596316 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-b8kbq"] Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.596961 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.598813 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-etc-kubernetes\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.598928 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-conf-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599003 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-hostroot\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599068 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-cni-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599159 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-cni-multus\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599245 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-netns\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599310 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-cnibin\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599376 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-k8s-cni-cncf-io\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599323 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599448 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-os-release\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599556 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1c1dca0a-c782-43f9-9390-7dc9c5311b97-cni-binary-copy\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599648 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-system-cni-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599716 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-cni-bin\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599790 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk8s8\" (UniqueName: \"kubernetes.io/projected/1c1dca0a-c782-43f9-9390-7dc9c5311b97-kube-api-access-sk8s8\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599865 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-daemon-config\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.599931 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-multus-certs\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.600010 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-socket-dir-parent\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.600081 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-kubelet\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.600309 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.606746 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.649863 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.675449 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.698777 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701033 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-cni-bin\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701085 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk8s8\" (UniqueName: \"kubernetes.io/projected/1c1dca0a-c782-43f9-9390-7dc9c5311b97-kube-api-access-sk8s8\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701143 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-system-cni-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701169 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-daemon-config\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701191 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-multus-certs\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701213 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-socket-dir-parent\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701201 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-cni-bin\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701233 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-kubelet\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701279 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-kubelet\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701339 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-multus-certs\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701364 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cni-binary-copy\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701455 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-conf-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701512 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-conf-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701531 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-etc-kubernetes\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701575 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-system-cni-dir\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701590 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-system-cni-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701601 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-socket-dir-parent\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701642 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-hostroot\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701604 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-etc-kubernetes\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701707 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-hostroot\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701669 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f22b9702-cd33-405b-9cea-babf675908f5-rootfs\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701764 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f22b9702-cd33-405b-9cea-babf675908f5-mcd-auth-proxy-config\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701821 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701838 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-os-release\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701903 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a0238833-fbe2-4738-8079-14957d0506f7-hosts-file\") pod \"node-resolver-j879q\" (UID: \"a0238833-fbe2-4738-8079-14957d0506f7\") " pod="openshift-dns/node-resolver-j879q" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701927 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsz26\" (UniqueName: \"kubernetes.io/projected/f22b9702-cd33-405b-9cea-babf675908f5-kube-api-access-jsz26\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701943 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-tuning-conf-dir\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701959 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-cni-multus\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701977 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cnibin\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.701996 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8lnq\" (UniqueName: \"kubernetes.io/projected/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-kube-api-access-w8lnq\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702038 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-var-lib-cni-multus\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702063 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-cni-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702095 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-cnibin\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702135 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-k8s-cni-cncf-io\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702158 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-netns\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702185 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-cnibin\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702212 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-k8s-cni-cncf-io\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702262 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-cni-dir\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702278 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-host-run-netns\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702265 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f22b9702-cd33-405b-9cea-babf675908f5-proxy-tls\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702322 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-os-release\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702337 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1c1dca0a-c782-43f9-9390-7dc9c5311b97-cni-binary-copy\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702360 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7p42\" (UniqueName: \"kubernetes.io/projected/a0238833-fbe2-4738-8079-14957d0506f7-kube-api-access-s7p42\") pod \"node-resolver-j879q\" (UID: \"a0238833-fbe2-4738-8079-14957d0506f7\") " pod="openshift-dns/node-resolver-j879q" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702519 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/1c1dca0a-c782-43f9-9390-7dc9c5311b97-multus-daemon-config\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702713 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1c1dca0a-c782-43f9-9390-7dc9c5311b97-os-release\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.702876 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1c1dca0a-c782-43f9-9390-7dc9c5311b97-cni-binary-copy\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.719604 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.724328 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk8s8\" (UniqueName: \"kubernetes.io/projected/1c1dca0a-c782-43f9-9390-7dc9c5311b97-kube-api-access-sk8s8\") pod \"multus-bbjnj\" (UID: \"1c1dca0a-c782-43f9-9390-7dc9c5311b97\") " pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.734941 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.746498 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.758348 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.770663 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.782323 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.798161 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803622 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f22b9702-cd33-405b-9cea-babf675908f5-rootfs\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803684 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f22b9702-cd33-405b-9cea-babf675908f5-mcd-auth-proxy-config\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803714 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803758 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-os-release\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803793 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a0238833-fbe2-4738-8079-14957d0506f7-hosts-file\") pod \"node-resolver-j879q\" (UID: \"a0238833-fbe2-4738-8079-14957d0506f7\") " pod="openshift-dns/node-resolver-j879q" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803818 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsz26\" (UniqueName: \"kubernetes.io/projected/f22b9702-cd33-405b-9cea-babf675908f5-kube-api-access-jsz26\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803824 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f22b9702-cd33-405b-9cea-babf675908f5-rootfs\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803926 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/a0238833-fbe2-4738-8079-14957d0506f7-hosts-file\") pod \"node-resolver-j879q\" (UID: \"a0238833-fbe2-4738-8079-14957d0506f7\") " pod="openshift-dns/node-resolver-j879q" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803933 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-os-release\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.803842 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-tuning-conf-dir\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804004 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8lnq\" (UniqueName: \"kubernetes.io/projected/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-kube-api-access-w8lnq\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804053 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cnibin\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804075 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f22b9702-cd33-405b-9cea-babf675908f5-proxy-tls\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804101 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7p42\" (UniqueName: \"kubernetes.io/projected/a0238833-fbe2-4738-8079-14957d0506f7-kube-api-access-s7p42\") pod \"node-resolver-j879q\" (UID: \"a0238833-fbe2-4738-8079-14957d0506f7\") " pod="openshift-dns/node-resolver-j879q" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804176 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-system-cni-dir\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804200 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cni-binary-copy\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804316 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-system-cni-dir\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804501 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cnibin\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804516 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-tuning-conf-dir\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804629 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f22b9702-cd33-405b-9cea-babf675908f5-mcd-auth-proxy-config\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804694 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.804778 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-cni-binary-copy\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.807725 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f22b9702-cd33-405b-9cea-babf675908f5-proxy-tls\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.811742 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.819925 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsz26\" (UniqueName: \"kubernetes.io/projected/f22b9702-cd33-405b-9cea-babf675908f5-kube-api-access-jsz26\") pod \"machine-config-daemon-r75mw\" (UID: \"f22b9702-cd33-405b-9cea-babf675908f5\") " pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.822844 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8lnq\" (UniqueName: \"kubernetes.io/projected/7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca-kube-api-access-w8lnq\") pod \"multus-additional-cni-plugins-b8kbq\" (UID: \"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\") " pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.824261 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.828177 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7p42\" (UniqueName: \"kubernetes.io/projected/a0238833-fbe2-4738-8079-14957d0506f7-kube-api-access-s7p42\") pod \"node-resolver-j879q\" (UID: \"a0238833-fbe2-4738-8079-14957d0506f7\") " pod="openshift-dns/node-resolver-j879q" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.836426 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.853475 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.867569 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.870755 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bbjnj" Jan 28 15:17:24 crc kubenswrapper[4959]: W0128 15:17:24.882183 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c1dca0a_c782_43f9_9390_7dc9c5311b97.slice/crio-0b07a328a50bae46eafefcaad812967a0230d144604f4e950f7c8b66f9b0293f WatchSource:0}: Error finding container 0b07a328a50bae46eafefcaad812967a0230d144604f4e950f7c8b66f9b0293f: Status 404 returned error can't find the container with id 0b07a328a50bae46eafefcaad812967a0230d144604f4e950f7c8b66f9b0293f Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.883146 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.884304 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-j879q" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.894328 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.900857 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: W0128 15:17:24.901558 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0238833_fbe2_4738_8079_14957d0506f7.slice/crio-08f537c2e510a707f0679db1bf7ed07db38be54c575201d445b2b4f9ebaa93b4 WatchSource:0}: Error finding container 08f537c2e510a707f0679db1bf7ed07db38be54c575201d445b2b4f9ebaa93b4: Status 404 returned error can't find the container with id 08f537c2e510a707f0679db1bf7ed07db38be54c575201d445b2b4f9ebaa93b4 Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.908902 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.921535 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.937027 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.954192 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.975523 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mvzjl"] Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.976928 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.980195 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.981068 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.981170 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.981274 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.981382 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.983612 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.983956 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 15:17:24 crc kubenswrapper[4959]: I0128 15:17:24.996648 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:24Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.017793 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.031146 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.046678 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.058022 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.076090 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.091996 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.105259 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.106997 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovn-node-metrics-cert\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.107041 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-netd\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.107061 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-netns\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.107562 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-script-lib\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.107820 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-systemd\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.107854 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-kubelet\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.107910 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-ovn-kubernetes\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.107995 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-etc-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108048 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-ovn\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108069 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-node-log\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108131 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-config\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108159 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-systemd-units\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108227 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5t2sz\" (UniqueName: \"kubernetes.io/projected/1bad991a-9aad-4e7b-abdd-7d23124f60a8-kube-api-access-5t2sz\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108250 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-var-lib-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108294 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-log-socket\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108320 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108399 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-slash\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108438 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108460 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-env-overrides\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.108551 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-bin\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.118080 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.137371 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.151903 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.165361 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.178569 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.210705 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.210587 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211063 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-env-overrides\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211091 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-slash\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211153 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-bin\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211240 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovn-node-metrics-cert\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211271 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-netd\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211271 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-bin\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211292 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-netns\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211254 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-slash\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211313 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-script-lib\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211439 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-kubelet\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211460 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-systemd\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211463 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-netns\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211480 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-ovn-kubernetes\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211507 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-kubelet\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211508 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-ovn-kubernetes\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211442 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-netd\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211530 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-systemd\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211572 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-ovn\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211587 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-node-log\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211607 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-etc-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211622 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-systemd-units\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211630 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-ovn\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211635 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-config\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211685 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-var-lib-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211707 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-log-socket\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211728 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5t2sz\" (UniqueName: \"kubernetes.io/projected/1bad991a-9aad-4e7b-abdd-7d23124f60a8-kube-api-access-5t2sz\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211748 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211827 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211856 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-var-lib-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211882 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-log-socket\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.211982 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-env-overrides\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.212027 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-script-lib\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.212052 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-node-log\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.212030 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-etc-openvswitch\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.212089 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-systemd-units\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.212125 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-config\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.215786 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovn-node-metrics-cert\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.229245 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5t2sz\" (UniqueName: \"kubernetes.io/projected/1bad991a-9aad-4e7b-abdd-7d23124f60a8-kube-api-access-5t2sz\") pod \"ovnkube-node-mvzjl\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.310143 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:25 crc kubenswrapper[4959]: W0128 15:17:25.321760 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bad991a_9aad_4e7b_abdd_7d23124f60a8.slice/crio-c456faa3ab9a0ef3260a9e81acb4cc41cdb2f4be9096ea70b82eb409a019e4b2 WatchSource:0}: Error finding container c456faa3ab9a0ef3260a9e81acb4cc41cdb2f4be9096ea70b82eb409a019e4b2: Status 404 returned error can't find the container with id c456faa3ab9a0ef3260a9e81acb4cc41cdb2f4be9096ea70b82eb409a019e4b2 Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.465659 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 16:39:38.81093985 +0000 UTC Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.586837 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.586907 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:25 crc kubenswrapper[4959]: E0128 15:17:25.586990 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:25 crc kubenswrapper[4959]: E0128 15:17:25.587071 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.717615 4959 generic.go:334] "Generic (PLEG): container finished" podID="7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca" containerID="8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6" exitCode=0 Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.717684 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" event={"ID":"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca","Type":"ContainerDied","Data":"8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.717720 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" event={"ID":"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca","Type":"ContainerStarted","Data":"4d6701c444aefeaf962ec4f752abbd6f52b604f77a10a7780717d2ca1bbf0d7f"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.718983 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bbjnj" event={"ID":"1c1dca0a-c782-43f9-9390-7dc9c5311b97","Type":"ContainerStarted","Data":"17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.719007 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bbjnj" event={"ID":"1c1dca0a-c782-43f9-9390-7dc9c5311b97","Type":"ContainerStarted","Data":"0b07a328a50bae46eafefcaad812967a0230d144604f4e950f7c8b66f9b0293f"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.720831 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf" exitCode=0 Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.720911 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.720960 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"c456faa3ab9a0ef3260a9e81acb4cc41cdb2f4be9096ea70b82eb409a019e4b2"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.723022 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.723066 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.723079 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"88d716c39652de2ec05bf93883dfd8c4d5c67ab38bcfee1bdbc33cde58c99c2e"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.724624 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-j879q" event={"ID":"a0238833-fbe2-4738-8079-14957d0506f7","Type":"ContainerStarted","Data":"15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.724658 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-j879q" event={"ID":"a0238833-fbe2-4738-8079-14957d0506f7","Type":"ContainerStarted","Data":"08f537c2e510a707f0679db1bf7ed07db38be54c575201d445b2b4f9ebaa93b4"} Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.728649 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.748876 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.762576 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.779056 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.795144 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.809751 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.824472 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.837633 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.851677 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.864405 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.883832 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.898340 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.912546 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.932777 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.946931 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.960912 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.976834 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:25 crc kubenswrapper[4959]: I0128 15:17:25.991816 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:25Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.004999 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.018396 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.032007 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.053705 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.066806 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.083885 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.101269 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.119364 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.153768 4959 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.156037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.156076 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.156089 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.156258 4959 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.167990 4959 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.168302 4959 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.169540 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.169583 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.169594 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.169610 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.169622 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: E0128 15:17:26.190308 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.194146 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.194190 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.194203 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.194225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.194238 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: E0128 15:17:26.205619 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.209102 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.209192 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.209211 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.209235 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.209252 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: E0128 15:17:26.220589 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.227535 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.227584 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.227598 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.227618 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.227632 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: E0128 15:17:26.239776 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.243338 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.243367 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.243406 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.243422 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.243431 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: E0128 15:17:26.254227 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: E0128 15:17:26.254389 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.256717 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.256765 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.256778 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.256800 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.256819 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.359970 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.360005 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.360015 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.360033 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.360044 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.462893 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.463389 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.463406 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.463424 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.463437 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.466485 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 12:34:34.819566865 +0000 UTC Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.565900 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.565952 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.565964 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.565981 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.565993 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.587189 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:26 crc kubenswrapper[4959]: E0128 15:17:26.587368 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.668205 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.668248 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.668259 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.668278 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.668288 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.736568 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.736627 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.736641 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.736650 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.739341 4959 generic.go:334] "Generic (PLEG): container finished" podID="7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca" containerID="3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26" exitCode=0 Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.739422 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" event={"ID":"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca","Type":"ContainerDied","Data":"3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.754754 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.768933 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.771185 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.771565 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.771620 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.771644 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.771655 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.783110 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.800265 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.822026 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.835499 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.849506 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.864491 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.875043 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.875084 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.875093 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.875109 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.875123 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.881305 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.895285 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.917040 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.931745 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.944099 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:26Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.978111 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.978163 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.978171 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.978185 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:26 crc kubenswrapper[4959]: I0128 15:17:26.978195 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:26Z","lastTransitionTime":"2026-01-28T15:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.080420 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.080469 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.080479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.080500 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.080514 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.182999 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.183041 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.183051 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.183068 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.183079 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.229006 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-xg4vp"] Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.229495 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.231156 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.231221 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.232166 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.232209 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.243634 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.257262 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.268698 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.280906 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.285358 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.285396 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.285404 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.285421 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.285432 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.293703 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.303534 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.317877 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.329683 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.330308 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6m6k\" (UniqueName: \"kubernetes.io/projected/9d40d4ce-1825-487f-a8d8-7c1c68811757-kube-api-access-r6m6k\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.330374 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/9d40d4ce-1825-487f-a8d8-7c1c68811757-serviceca\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.330406 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9d40d4ce-1825-487f-a8d8-7c1c68811757-host\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.341561 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.353990 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.373719 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.382606 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.387815 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.387860 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.387873 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.387891 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.387906 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.395161 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.408019 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.431786 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/9d40d4ce-1825-487f-a8d8-7c1c68811757-serviceca\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.431856 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9d40d4ce-1825-487f-a8d8-7c1c68811757-host\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.431889 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6m6k\" (UniqueName: \"kubernetes.io/projected/9d40d4ce-1825-487f-a8d8-7c1c68811757-kube-api-access-r6m6k\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.431921 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9d40d4ce-1825-487f-a8d8-7c1c68811757-host\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.433101 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/9d40d4ce-1825-487f-a8d8-7c1c68811757-serviceca\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.450362 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6m6k\" (UniqueName: \"kubernetes.io/projected/9d40d4ce-1825-487f-a8d8-7c1c68811757-kube-api-access-r6m6k\") pod \"node-ca-xg4vp\" (UID: \"9d40d4ce-1825-487f-a8d8-7c1c68811757\") " pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.467116 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 14:43:19.650950355 +0000 UTC Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.490346 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.490399 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.490413 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.490429 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.490440 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.543021 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-xg4vp" Jan 28 15:17:27 crc kubenswrapper[4959]: W0128 15:17:27.554406 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d40d4ce_1825_487f_a8d8_7c1c68811757.slice/crio-4e7e0b803ee8060813a9fb819b237b9738bf18b1f455872ff655d5822a24c6e8 WatchSource:0}: Error finding container 4e7e0b803ee8060813a9fb819b237b9738bf18b1f455872ff655d5822a24c6e8: Status 404 returned error can't find the container with id 4e7e0b803ee8060813a9fb819b237b9738bf18b1f455872ff655d5822a24c6e8 Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.586454 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.586462 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:27 crc kubenswrapper[4959]: E0128 15:17:27.586988 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:27 crc kubenswrapper[4959]: E0128 15:17:27.587002 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.595071 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.595139 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.595153 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.595173 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.595186 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.698591 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.698649 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.698665 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.698684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.698696 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.745389 4959 generic.go:334] "Generic (PLEG): container finished" podID="7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca" containerID="ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b" exitCode=0 Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.745494 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" event={"ID":"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca","Type":"ContainerDied","Data":"ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.746885 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-xg4vp" event={"ID":"9d40d4ce-1825-487f-a8d8-7c1c68811757","Type":"ContainerStarted","Data":"4e7e0b803ee8060813a9fb819b237b9738bf18b1f455872ff655d5822a24c6e8"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.752839 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.752890 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.759998 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.773332 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.786088 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.801965 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.802012 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.802021 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.802037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.802047 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.807213 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.818126 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.834854 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.849273 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.862892 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.876460 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.888279 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.920952 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.928393 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.928434 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.928443 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.928460 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.928470 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:27Z","lastTransitionTime":"2026-01-28T15:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.945448 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.958745 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:27 crc kubenswrapper[4959]: I0128 15:17:27.975382 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.031741 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.031784 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.031803 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.031824 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.031843 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.134170 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.134221 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.134236 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.134257 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.134271 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.138523 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.138637 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.138669 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.138705 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138795 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:17:36.138758424 +0000 UTC m=+39.584664837 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138829 4959 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138877 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:36.138862517 +0000 UTC m=+39.584768900 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138801 4959 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138884 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.138907 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138922 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138984 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138993 4959 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138998 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.139019 4959 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.138908 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:36.138902408 +0000 UTC m=+39.584808791 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.139080 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:36.139065802 +0000 UTC m=+39.584972225 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.139115 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:36.139096423 +0000 UTC m=+39.585002836 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.236963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.237010 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.237021 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.237038 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.237050 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.340024 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.340175 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.340206 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.340242 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.340268 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.443695 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.443739 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.443748 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.443767 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.443781 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.468191 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 01:32:20.251212002 +0000 UTC Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.546166 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.546219 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.546228 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.546244 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.546254 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.586867 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:28 crc kubenswrapper[4959]: E0128 15:17:28.587085 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.648056 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.648094 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.648101 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.648117 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.648125 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.750808 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.750865 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.750881 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.750907 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.750925 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.758706 4959 generic.go:334] "Generic (PLEG): container finished" podID="7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca" containerID="e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e" exitCode=0 Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.758811 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" event={"ID":"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca","Type":"ContainerDied","Data":"e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.760754 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-xg4vp" event={"ID":"9d40d4ce-1825-487f-a8d8-7c1c68811757","Type":"ContainerStarted","Data":"a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.782330 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.799512 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.821961 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.836138 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.849603 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.854078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.854128 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.854164 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.854185 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.854198 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.869588 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.891708 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.911166 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.926651 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.940929 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.955684 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.957465 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.957502 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.957516 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.957535 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.957548 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:28Z","lastTransitionTime":"2026-01-28T15:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.972770 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:28 crc kubenswrapper[4959]: I0128 15:17:28.991229 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.004773 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.024650 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.039070 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.051533 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.060311 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.060342 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.060354 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.060371 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.060382 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.066433 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.078902 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.093608 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.106691 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.119839 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.130542 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.145180 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.162373 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.162417 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.162428 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.162445 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.162456 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.164192 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.177175 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.190244 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.210324 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.265442 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.265483 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.265493 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.265511 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.265523 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.367975 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.368024 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.368035 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.368054 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.368068 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.469091 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 07:20:06.422813592 +0000 UTC Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.471158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.471208 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.471218 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.471236 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.471247 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.573599 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.573649 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.573657 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.573673 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.573685 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.587130 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.587161 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:29 crc kubenswrapper[4959]: E0128 15:17:29.587294 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:29 crc kubenswrapper[4959]: E0128 15:17:29.587540 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.676053 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.676092 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.676100 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.676129 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.676140 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.768760 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.771867 4959 generic.go:334] "Generic (PLEG): container finished" podID="7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca" containerID="fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d" exitCode=0 Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.771963 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" event={"ID":"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca","Type":"ContainerDied","Data":"fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.777777 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.777807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.777817 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.777833 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.777848 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.791225 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.810608 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.826677 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.843159 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.863335 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.879692 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.879735 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.879747 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.879766 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.879777 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.883321 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.901850 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.921735 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.935638 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.948085 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.961983 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.962712 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.980705 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.981538 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.981566 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.981579 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.981596 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.981610 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:29Z","lastTransitionTime":"2026-01-28T15:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:29 crc kubenswrapper[4959]: I0128 15:17:29.993841 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:29Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.009904 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.025741 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.037483 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.054510 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.068987 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.083439 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.084359 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.084393 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.084430 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.084452 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.084465 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.100145 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.117627 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.128327 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.143339 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.145768 4959 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 28 15:17:30 crc kubenswrapper[4959]: E0128 15:17:30.163774 4959 request.go:1255] Unexpected error when reading response body: read tcp 38.102.83.107:48724->38.102.83.107:6443: use of closed network connection Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.163869 4959 status_manager.go:851] "Failed to get status for pod" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="unexpected error when reading response body. Please retry. Original error: read tcp 38.102.83.107:48724->38.102.83.107:6443: use of closed network connection" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.174729 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.184555 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.186271 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.186306 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.186316 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.186329 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.186339 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.197994 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.208593 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.289501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.289553 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.289565 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.289583 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.289596 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.392541 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.392586 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.392595 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.392611 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.392622 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.469893 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 01:49:42.986991623 +0000 UTC Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.494867 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.494905 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.494917 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.494937 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.494948 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.586789 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:30 crc kubenswrapper[4959]: E0128 15:17:30.586936 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.597526 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.597578 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.597589 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.597610 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.597624 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.602468 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.615432 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.630823 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.647041 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.666131 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.676570 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.690160 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.699000 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.699037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.699047 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.699064 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.699076 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.700916 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.712474 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.723149 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.734185 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.745723 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.755812 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.769609 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.782001 4959 generic.go:334] "Generic (PLEG): container finished" podID="7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca" containerID="156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed" exitCode=0 Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.782065 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" event={"ID":"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca","Type":"ContainerDied","Data":"156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.795517 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.801971 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.802018 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.802034 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.802053 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.802065 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.809559 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.827642 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.843038 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.860942 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.875065 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.894328 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.905556 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.907075 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.907152 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.907163 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.907180 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.907191 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:30Z","lastTransitionTime":"2026-01-28T15:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.918624 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.931242 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.944290 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.958262 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.977556 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:30 crc kubenswrapper[4959]: I0128 15:17:30.990279 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.009901 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.009938 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.009951 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.009967 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.009979 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.112260 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.112297 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.112308 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.112324 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.112335 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.214803 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.214842 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.214851 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.214865 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.214877 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.316552 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.316590 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.316601 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.316616 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.316630 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.422075 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.422147 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.422160 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.422176 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.422187 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.470559 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 22:09:01.505213451 +0000 UTC Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.523962 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.523992 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.524000 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.524014 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.524023 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.586460 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.586479 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:31 crc kubenswrapper[4959]: E0128 15:17:31.586633 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:31 crc kubenswrapper[4959]: E0128 15:17:31.586739 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.627072 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.627109 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.627117 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.627152 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.627167 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.729809 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.729846 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.729854 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.729869 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.729879 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.788126 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" event={"ID":"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca","Type":"ContainerStarted","Data":"58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.793170 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.793478 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.806782 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.817066 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.822744 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.832391 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.832442 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.832466 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.832480 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.832499 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.837231 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.852695 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.865360 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.876772 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.886490 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.899996 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.913597 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.926913 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.936668 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.937414 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.937427 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.937445 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.937455 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:31Z","lastTransitionTime":"2026-01-28T15:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.941516 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.954454 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.974939 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:31 crc kubenswrapper[4959]: I0128 15:17:31.986973 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:31Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.003381 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.018455 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.029851 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.040857 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.040908 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.040918 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.040936 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.040947 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.043827 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.063209 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.073594 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.088575 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.102872 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.116636 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.129799 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.146960 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.147006 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.147020 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.147036 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.147047 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.165991 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.186694 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.207614 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.223094 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.253144 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.253215 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.253229 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.253250 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.253263 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.356546 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.356917 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.356931 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.356947 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.356959 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.459896 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.459945 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.459957 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.459977 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.459990 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.471279 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 02:31:10.121174515 +0000 UTC Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.562097 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.562156 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.562166 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.562184 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.562195 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.586804 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:32 crc kubenswrapper[4959]: E0128 15:17:32.586941 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.665072 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.665123 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.665134 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.665148 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.665158 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.768552 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.768600 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.768613 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.768632 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.768646 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.797190 4959 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.797583 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.871059 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.871096 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.871107 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.871141 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.871151 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.873797 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.893526 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.904815 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.917506 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.935263 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.948767 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.960092 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.972530 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.974002 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.974043 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.974053 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.974069 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.974079 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:32Z","lastTransitionTime":"2026-01-28T15:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:32 crc kubenswrapper[4959]: I0128 15:17:32.985412 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.000959 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:32Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.012581 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:33Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.032878 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:33Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.056368 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:33Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.074602 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:33Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.078412 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.078458 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.078472 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.078493 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.078508 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.090665 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:33Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.181049 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.181096 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.181121 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.181166 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.181181 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.284298 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.284339 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.284349 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.284365 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.284376 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.387550 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.387613 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.387624 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.387644 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.387655 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.472084 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 20:37:25.863762315 +0000 UTC Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.490645 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.490690 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.490703 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.490721 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.490736 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.588400 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.588727 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:33 crc kubenswrapper[4959]: E0128 15:17:33.589705 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:33 crc kubenswrapper[4959]: E0128 15:17:33.589958 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.593451 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.593476 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.593484 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.593499 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.593509 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.696448 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.696504 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.696519 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.696543 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.696561 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.800802 4959 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.801615 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.801671 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.801684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.801738 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.801758 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.905431 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.905476 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.905485 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.905501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:33 crc kubenswrapper[4959]: I0128 15:17:33.905513 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:33Z","lastTransitionTime":"2026-01-28T15:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.007495 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.007540 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.007550 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.007566 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.007577 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.109877 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.109927 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.109937 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.109955 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.109967 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.213076 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.213144 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.213158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.213178 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.213192 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.315755 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.315797 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.315805 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.315821 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.315832 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.417979 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.418020 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.418031 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.418048 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.418057 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.472669 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 05:24:07.532481601 +0000 UTC Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.520723 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.521177 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.521309 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.521427 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.521515 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.587141 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:34 crc kubenswrapper[4959]: E0128 15:17:34.587311 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.624265 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.624304 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.624315 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.624329 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.624340 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.726815 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.726857 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.726866 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.726884 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.726896 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.803730 4959 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.829567 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.829605 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.829613 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.829630 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.829640 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.932017 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.932132 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.932143 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.932159 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:34 crc kubenswrapper[4959]: I0128 15:17:34.932169 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:34Z","lastTransitionTime":"2026-01-28T15:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.035180 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.035223 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.035235 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.035255 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.035267 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.137362 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.137404 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.137415 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.137433 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.137444 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.240425 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.240470 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.240480 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.240497 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.240514 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.342382 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.342427 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.342438 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.342452 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.342462 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.444885 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.445043 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.445060 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.445088 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.445132 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.473268 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 23:40:03.141560639 +0000 UTC Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.547669 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.547714 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.547723 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.547739 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.547754 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.586788 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.586797 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:35 crc kubenswrapper[4959]: E0128 15:17:35.586957 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:35 crc kubenswrapper[4959]: E0128 15:17:35.587057 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.650343 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.650388 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.650407 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.650426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.650440 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.753665 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.753701 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.753710 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.753724 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.753735 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.807936 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/0.log" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.809831 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03" exitCode=1 Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.809874 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.810533 4959 scope.go:117] "RemoveContainer" containerID="fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.824762 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.837487 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.850998 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.857941 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.858019 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.858037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.858061 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.858078 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.866889 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.879768 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.895275 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.908719 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.920305 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.932386 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.953143 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:34Z\\\",\\\"message\\\":\\\"ice/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 15:17:34.781386 6305 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 15:17:34.781424 6305 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 15:17:34.781443 6305 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 15:17:34.781481 6305 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 15:17:34.781480 6305 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 15:17:34.781510 6305 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 15:17:34.781519 6305 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 15:17:34.781530 6305 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 15:17:34.781552 6305 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 15:17:34.781563 6305 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 15:17:34.781560 6305 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 15:17:34.781580 6305 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 15:17:34.781584 6305 factory.go:656] Stopping watch factory\\\\nI0128 15:17:34.781593 6305 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 15:17:34.781600 6305 ovnkube.go:599] Stopped ovnkube\\\\nI0128 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.961068 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.961144 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.961158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.961177 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.961191 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:35Z","lastTransitionTime":"2026-01-28T15:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.962895 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.975963 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:35 crc kubenswrapper[4959]: I0128 15:17:35.990401 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:35Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.003460 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.063951 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.063981 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.063990 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.064003 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.064013 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.167525 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.167846 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.167860 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.167880 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.167892 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.225476 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.225571 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.225595 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225694 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:17:52.225653884 +0000 UTC m=+55.671560297 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225727 4959 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225708 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.225780 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225789 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225853 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225867 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225871 4959 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225837 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:52.225793728 +0000 UTC m=+55.671700101 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.225898 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225940 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:52.225925681 +0000 UTC m=+55.671832104 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225878 4959 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225999 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:52.225989823 +0000 UTC m=+55.671896346 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.225944 4959 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.226040 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:52.226033845 +0000 UTC m=+55.671940228 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.270738 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.270782 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.270793 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.270811 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.270823 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.373856 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.373916 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.373934 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.373960 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.373979 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.473888 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 08:21:14.580602645 +0000 UTC Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.476713 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.476757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.476768 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.476784 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.476794 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.563306 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.563343 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.563351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.563364 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.563375 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.574795 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.578220 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.578267 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.578285 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.578307 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.578325 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.586568 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.586691 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.591751 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.595449 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.595485 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.595493 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.595508 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.595521 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.607391 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.611674 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.611735 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.611755 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.611781 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.611801 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.626501 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.630066 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.630153 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.630166 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.630184 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.630197 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.645832 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: E0128 15:17:36.645993 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.647802 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.647855 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.647865 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.647884 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.647897 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.750130 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.750173 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.750182 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.750202 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.750215 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.808274 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj"] Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.808713 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.810954 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.812267 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.824600 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.835347 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.850634 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.851958 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.851986 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.851996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.852010 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.852019 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.862154 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.874053 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.891552 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.907448 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.926596 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:34Z\\\",\\\"message\\\":\\\"ice/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 15:17:34.781386 6305 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 15:17:34.781424 6305 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 15:17:34.781443 6305 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 15:17:34.781481 6305 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 15:17:34.781480 6305 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 15:17:34.781510 6305 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 15:17:34.781519 6305 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 15:17:34.781530 6305 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 15:17:34.781552 6305 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 15:17:34.781563 6305 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 15:17:34.781560 6305 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 15:17:34.781580 6305 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 15:17:34.781584 6305 factory.go:656] Stopping watch factory\\\\nI0128 15:17:34.781593 6305 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 15:17:34.781600 6305 ovnkube.go:599] Stopped ovnkube\\\\nI0128 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.931931 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/42bda473-f030-45fd-99a9-bbe18a224ae3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.931986 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/42bda473-f030-45fd-99a9-bbe18a224ae3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.932002 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/42bda473-f030-45fd-99a9-bbe18a224ae3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.932028 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlmtc\" (UniqueName: \"kubernetes.io/projected/42bda473-f030-45fd-99a9-bbe18a224ae3-kube-api-access-qlmtc\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.937755 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.950079 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.954127 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.954164 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.954174 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.954189 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.954201 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:36Z","lastTransitionTime":"2026-01-28T15:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.964059 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.978357 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:36 crc kubenswrapper[4959]: I0128 15:17:36.990565 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:36Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.003334 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.015105 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.032531 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/42bda473-f030-45fd-99a9-bbe18a224ae3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.032580 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/42bda473-f030-45fd-99a9-bbe18a224ae3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.032613 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlmtc\" (UniqueName: \"kubernetes.io/projected/42bda473-f030-45fd-99a9-bbe18a224ae3-kube-api-access-qlmtc\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.032645 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/42bda473-f030-45fd-99a9-bbe18a224ae3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.033357 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/42bda473-f030-45fd-99a9-bbe18a224ae3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.034155 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/42bda473-f030-45fd-99a9-bbe18a224ae3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.038679 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/42bda473-f030-45fd-99a9-bbe18a224ae3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.049238 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlmtc\" (UniqueName: \"kubernetes.io/projected/42bda473-f030-45fd-99a9-bbe18a224ae3-kube-api-access-qlmtc\") pod \"ovnkube-control-plane-749d76644c-dsmtj\" (UID: \"42bda473-f030-45fd-99a9-bbe18a224ae3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.056633 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.056668 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.056680 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.056696 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.056708 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.121490 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" Jan 28 15:17:37 crc kubenswrapper[4959]: W0128 15:17:37.136509 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42bda473_f030_45fd_99a9_bbe18a224ae3.slice/crio-2c72105d408f04ffa718142ae5477bd92c47fc5f6de1c9bc2924dc1fdad1ad8b WatchSource:0}: Error finding container 2c72105d408f04ffa718142ae5477bd92c47fc5f6de1c9bc2924dc1fdad1ad8b: Status 404 returned error can't find the container with id 2c72105d408f04ffa718142ae5477bd92c47fc5f6de1c9bc2924dc1fdad1ad8b Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.159518 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.159563 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.159576 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.159636 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.159650 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.262731 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.262775 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.262786 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.262805 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.262816 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.365679 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.365754 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.365767 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.365786 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.365800 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.467775 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.467808 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.467816 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.467829 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.467839 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.474354 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 07:56:41.284364842 +0000 UTC Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.570233 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.570275 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.570286 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.570303 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.570313 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.586568 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:37 crc kubenswrapper[4959]: E0128 15:17:37.586699 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.586795 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:37 crc kubenswrapper[4959]: E0128 15:17:37.586955 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.672410 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.672446 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.672458 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.672475 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.672487 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.774003 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.774038 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.774047 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.774064 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.774101 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.820800 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" event={"ID":"42bda473-f030-45fd-99a9-bbe18a224ae3","Type":"ContainerStarted","Data":"1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.820848 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" event={"ID":"42bda473-f030-45fd-99a9-bbe18a224ae3","Type":"ContainerStarted","Data":"2c72105d408f04ffa718142ae5477bd92c47fc5f6de1c9bc2924dc1fdad1ad8b"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.823040 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/1.log" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.823552 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/0.log" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.826638 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268" exitCode=1 Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.826672 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.826719 4959 scope.go:117] "RemoveContainer" containerID="fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.827474 4959 scope.go:117] "RemoveContainer" containerID="162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268" Jan 28 15:17:37 crc kubenswrapper[4959]: E0128 15:17:37.827656 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.843398 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.856398 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.868519 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.876737 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.876990 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.877021 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.877045 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.877060 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.881032 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.899069 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:34Z\\\",\\\"message\\\":\\\"ice/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 15:17:34.781386 6305 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 15:17:34.781424 6305 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 15:17:34.781443 6305 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 15:17:34.781481 6305 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 15:17:34.781480 6305 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 15:17:34.781510 6305 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 15:17:34.781519 6305 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 15:17:34.781530 6305 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 15:17:34.781552 6305 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 15:17:34.781563 6305 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 15:17:34.781560 6305 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 15:17:34.781580 6305 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 15:17:34.781584 6305 factory.go:656] Stopping watch factory\\\\nI0128 15:17:34.781593 6305 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 15:17:34.781600 6305 ovnkube.go:599] Stopped ovnkube\\\\nI0128 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:37Z\\\",\\\"message\\\":\\\"g{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-config-operator/metrics\\\\\\\"}\\\\nI0128 15:17:37.549691 6448 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-ingress/router-internal-default]} name:Service_openshift-ingress/router-internal-default_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.176:1936: 10.217.4.176:443: 10.217.4.176:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {595f6e90-7cd8-4871-85ab-9519d3c9c3e5}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:37.549825 6448 services_controller.go:360] Finished syncing service metrics on namespace openshift-config-operator for network=default : 5.737205ms\\\\nF0128 15:17:37.549829 6448 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.912975 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.928574 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.943069 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.960651 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.978941 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.982117 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.982152 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.982160 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.982176 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.982186 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:37Z","lastTransitionTime":"2026-01-28T15:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:37 crc kubenswrapper[4959]: I0128 15:17:37.998420 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:37Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.012639 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.022189 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.033877 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.042577 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.083824 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.083867 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.083881 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.083899 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.083911 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.186169 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.186212 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.186220 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.186234 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.186245 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.276961 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-4d9tj"] Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.277457 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:38 crc kubenswrapper[4959]: E0128 15:17:38.277519 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.288764 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.288808 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.288817 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.288834 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.288846 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.291367 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.303591 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.314513 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.333055 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:34Z\\\",\\\"message\\\":\\\"ice/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 15:17:34.781386 6305 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 15:17:34.781424 6305 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 15:17:34.781443 6305 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 15:17:34.781481 6305 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 15:17:34.781480 6305 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 15:17:34.781510 6305 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 15:17:34.781519 6305 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 15:17:34.781530 6305 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 15:17:34.781552 6305 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 15:17:34.781563 6305 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 15:17:34.781560 6305 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 15:17:34.781580 6305 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 15:17:34.781584 6305 factory.go:656] Stopping watch factory\\\\nI0128 15:17:34.781593 6305 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 15:17:34.781600 6305 ovnkube.go:599] Stopped ovnkube\\\\nI0128 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:37Z\\\",\\\"message\\\":\\\"g{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-config-operator/metrics\\\\\\\"}\\\\nI0128 15:17:37.549691 6448 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-ingress/router-internal-default]} name:Service_openshift-ingress/router-internal-default_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.176:1936: 10.217.4.176:443: 10.217.4.176:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {595f6e90-7cd8-4871-85ab-9519d3c9c3e5}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:37.549825 6448 services_controller.go:360] Finished syncing service metrics on namespace openshift-config-operator for network=default : 5.737205ms\\\\nF0128 15:17:37.549829 6448 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.342284 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.345878 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjqbn\" (UniqueName: \"kubernetes.io/projected/943bb4d7-0907-4b19-b9e0-580af6061632-kube-api-access-tjqbn\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.345949 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.353160 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.364139 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.373251 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.384982 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.390706 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.390744 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.390756 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.390769 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.390778 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.396861 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.407809 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.417999 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.428465 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.438184 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.446835 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjqbn\" (UniqueName: \"kubernetes.io/projected/943bb4d7-0907-4b19-b9e0-580af6061632-kube-api-access-tjqbn\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.446896 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:38 crc kubenswrapper[4959]: E0128 15:17:38.446996 4959 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:38 crc kubenswrapper[4959]: E0128 15:17:38.447044 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs podName:943bb4d7-0907-4b19-b9e0-580af6061632 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:38.947028848 +0000 UTC m=+42.392935231 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs") pod "network-metrics-daemon-4d9tj" (UID: "943bb4d7-0907-4b19-b9e0-580af6061632") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.451758 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.465566 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.468249 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjqbn\" (UniqueName: \"kubernetes.io/projected/943bb4d7-0907-4b19-b9e0-580af6061632-kube-api-access-tjqbn\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.474986 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 02:54:22.161498359 +0000 UTC Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.493580 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.493626 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.493638 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.493656 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.493671 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.586224 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:38 crc kubenswrapper[4959]: E0128 15:17:38.586616 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.595794 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.595825 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.595836 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.595849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.595860 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.698242 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.698286 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.698298 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.698313 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.698323 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.800828 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.800875 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.800885 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.800901 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.800912 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.834136 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/1.log" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.841233 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" event={"ID":"42bda473-f030-45fd-99a9-bbe18a224ae3","Type":"ContainerStarted","Data":"eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.865642 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.884728 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.905655 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.905721 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.905737 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.905760 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.905774 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:38Z","lastTransitionTime":"2026-01-28T15:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.906158 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.926996 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.939866 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.951815 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:38 crc kubenswrapper[4959]: E0128 15:17:38.952411 4959 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:38 crc kubenswrapper[4959]: E0128 15:17:38.952605 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs podName:943bb4d7-0907-4b19-b9e0-580af6061632 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:39.952557383 +0000 UTC m=+43.398463806 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs") pod "network-metrics-daemon-4d9tj" (UID: "943bb4d7-0907-4b19-b9e0-580af6061632") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.959609 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.978451 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:38 crc kubenswrapper[4959]: I0128 15:17:38.997124 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.008012 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.008051 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.008060 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.008078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.008088 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.009995 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:39Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.022651 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:39Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.041353 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:34Z\\\",\\\"message\\\":\\\"ice/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 15:17:34.781386 6305 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 15:17:34.781424 6305 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 15:17:34.781443 6305 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 15:17:34.781481 6305 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 15:17:34.781480 6305 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 15:17:34.781510 6305 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 15:17:34.781519 6305 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 15:17:34.781530 6305 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 15:17:34.781552 6305 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 15:17:34.781563 6305 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 15:17:34.781560 6305 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 15:17:34.781580 6305 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 15:17:34.781584 6305 factory.go:656] Stopping watch factory\\\\nI0128 15:17:34.781593 6305 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 15:17:34.781600 6305 ovnkube.go:599] Stopped ovnkube\\\\nI0128 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:37Z\\\",\\\"message\\\":\\\"g{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-config-operator/metrics\\\\\\\"}\\\\nI0128 15:17:37.549691 6448 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-ingress/router-internal-default]} name:Service_openshift-ingress/router-internal-default_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.176:1936: 10.217.4.176:443: 10.217.4.176:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {595f6e90-7cd8-4871-85ab-9519d3c9c3e5}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:37.549825 6448 services_controller.go:360] Finished syncing service metrics on namespace openshift-config-operator for network=default : 5.737205ms\\\\nF0128 15:17:37.549829 6448 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:39Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.052586 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:39Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.065899 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:39Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.083560 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:39Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.097222 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:39Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.110250 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.110286 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.110295 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.110309 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.110320 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.113190 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:39Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.212906 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.212953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.212963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.212978 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.212990 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.316211 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.316730 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.316751 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.316775 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.316794 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.419807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.419855 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.419867 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.419884 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.419897 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.475724 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 15:40:46.134979954 +0000 UTC Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.522695 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.522757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.522780 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.522811 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.522835 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.586366 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.586429 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:39 crc kubenswrapper[4959]: E0128 15:17:39.586487 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:39 crc kubenswrapper[4959]: E0128 15:17:39.586593 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.626153 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.626222 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.626242 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.626269 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.626288 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.729041 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.729149 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.729169 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.729196 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.729215 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.832145 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.832286 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.832299 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.832317 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.832329 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.935095 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.935170 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.935181 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.935200 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.935216 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:39Z","lastTransitionTime":"2026-01-28T15:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:39 crc kubenswrapper[4959]: I0128 15:17:39.962397 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:39 crc kubenswrapper[4959]: E0128 15:17:39.962713 4959 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:39 crc kubenswrapper[4959]: E0128 15:17:39.962857 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs podName:943bb4d7-0907-4b19-b9e0-580af6061632 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:41.962830445 +0000 UTC m=+45.408736858 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs") pod "network-metrics-daemon-4d9tj" (UID: "943bb4d7-0907-4b19-b9e0-580af6061632") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.038314 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.038353 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.038362 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.038375 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.038384 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.140303 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.140353 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.140365 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.140380 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.140389 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.242745 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.242786 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.242798 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.242813 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.242827 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.344624 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.344670 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.344680 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.344694 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.344705 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.447224 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.447258 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.447269 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.447282 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.447290 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.476329 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 23:58:16.161967756 +0000 UTC Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.550299 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.550372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.550391 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.550420 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.550437 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.587243 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.587440 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:40 crc kubenswrapper[4959]: E0128 15:17:40.587515 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:40 crc kubenswrapper[4959]: E0128 15:17:40.587689 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.604152 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.620422 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.633084 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.648860 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.654082 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.654149 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.654162 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.654182 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.654192 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.667933 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.681683 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.695792 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.721417 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.747020 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.756262 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.756305 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.756314 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.756328 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.756339 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.772766 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.786378 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.800496 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.813848 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.824815 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.844254 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:34Z\\\",\\\"message\\\":\\\"ice/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 15:17:34.781386 6305 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 15:17:34.781424 6305 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 15:17:34.781443 6305 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 15:17:34.781481 6305 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 15:17:34.781480 6305 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 15:17:34.781510 6305 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 15:17:34.781519 6305 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 15:17:34.781530 6305 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 15:17:34.781552 6305 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 15:17:34.781563 6305 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 15:17:34.781560 6305 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 15:17:34.781580 6305 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 15:17:34.781584 6305 factory.go:656] Stopping watch factory\\\\nI0128 15:17:34.781593 6305 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 15:17:34.781600 6305 ovnkube.go:599] Stopped ovnkube\\\\nI0128 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:37Z\\\",\\\"message\\\":\\\"g{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-config-operator/metrics\\\\\\\"}\\\\nI0128 15:17:37.549691 6448 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-ingress/router-internal-default]} name:Service_openshift-ingress/router-internal-default_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.176:1936: 10.217.4.176:443: 10.217.4.176:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {595f6e90-7cd8-4871-85ab-9519d3c9c3e5}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:37.549825 6448 services_controller.go:360] Finished syncing service metrics on namespace openshift-config-operator for network=default : 5.737205ms\\\\nF0128 15:17:37.549829 6448 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.853569 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.857877 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.857908 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.857918 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.857931 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.857940 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.960485 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.960519 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.960528 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.960543 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:40 crc kubenswrapper[4959]: I0128 15:17:40.960554 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:40Z","lastTransitionTime":"2026-01-28T15:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.063842 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.063895 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.063909 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.063935 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.063950 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.166474 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.166566 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.166580 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.166595 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.166608 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.268593 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.268667 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.268682 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.268698 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.268709 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.371898 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.371950 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.371960 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.371979 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.371989 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.474705 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.474760 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.474773 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.474794 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.474810 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.476892 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 15:05:19.156435478 +0000 UTC Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.577785 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.577837 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.577849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.577867 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.577879 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.586252 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.586399 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:41 crc kubenswrapper[4959]: E0128 15:17:41.586526 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:41 crc kubenswrapper[4959]: E0128 15:17:41.586699 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.681101 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.681193 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.681208 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.681234 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.681251 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.784414 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.784494 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.784515 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.784544 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.784566 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.887393 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.887478 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.887515 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.887548 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.887582 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.980846 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:41 crc kubenswrapper[4959]: E0128 15:17:41.981201 4959 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:41 crc kubenswrapper[4959]: E0128 15:17:41.981387 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs podName:943bb4d7-0907-4b19-b9e0-580af6061632 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:45.981336117 +0000 UTC m=+49.427242630 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs") pod "network-metrics-daemon-4d9tj" (UID: "943bb4d7-0907-4b19-b9e0-580af6061632") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.991051 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.991102 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.991140 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.991164 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:41 crc kubenswrapper[4959]: I0128 15:17:41.991180 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:41Z","lastTransitionTime":"2026-01-28T15:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.094282 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.094577 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.094611 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.094651 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.094688 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.197859 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.197956 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.197982 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.198019 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.198048 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.301961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.302051 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.302078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.302158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.302191 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.406402 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.406463 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.406483 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.406509 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.406531 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.477336 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 06:52:34.423860198 +0000 UTC Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.509212 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.509276 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.509291 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.509315 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.509333 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.586832 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:42 crc kubenswrapper[4959]: E0128 15:17:42.587048 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.587161 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:42 crc kubenswrapper[4959]: E0128 15:17:42.587435 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.612298 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.612387 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.612404 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.612426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.612806 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.716708 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.716777 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.716790 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.716812 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.716825 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.820153 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.820197 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.820209 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.820226 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.820331 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.923893 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.923951 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.923963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.923980 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:42 crc kubenswrapper[4959]: I0128 15:17:42.923995 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:42Z","lastTransitionTime":"2026-01-28T15:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.027078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.027177 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.027197 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.027225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.027246 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.130547 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.130666 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.130696 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.130739 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.130771 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.234461 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.234510 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.234519 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.234569 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.234581 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.337583 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.337652 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.337671 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.337699 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.337718 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.440953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.441052 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.441073 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.441138 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.441215 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.478268 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 18:34:56.88968844 +0000 UTC Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.544483 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.544556 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.544571 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.544592 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.544604 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.586149 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.586258 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:43 crc kubenswrapper[4959]: E0128 15:17:43.586297 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:43 crc kubenswrapper[4959]: E0128 15:17:43.586447 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.647800 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.647903 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.647929 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.647961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.647986 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.750483 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.750561 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.750580 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.750610 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.750632 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.853680 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.853754 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.853772 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.853796 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.853813 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.957134 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.957206 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.957219 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.957237 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:43 crc kubenswrapper[4959]: I0128 15:17:43.957273 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:43Z","lastTransitionTime":"2026-01-28T15:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.060226 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.060285 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.060299 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.060317 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.060332 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.163003 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.163079 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.163099 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.163157 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.163178 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.267235 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.267302 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.267317 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.267337 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.267357 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.370915 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.370967 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.370982 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.371014 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.371035 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.474456 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.474524 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.474544 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.474576 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.474598 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.479517 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 07:29:39.026970164 +0000 UTC Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.577641 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.577710 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.577729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.577757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.577777 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.586488 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.586498 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:44 crc kubenswrapper[4959]: E0128 15:17:44.586731 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:44 crc kubenswrapper[4959]: E0128 15:17:44.586843 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.680666 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.680704 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.680713 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.680729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.680738 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.784189 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.784231 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.784247 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.784264 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.784277 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.887304 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.887370 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.887382 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.887398 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.887409 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.992819 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.992880 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.992895 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.992916 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:44 crc kubenswrapper[4959]: I0128 15:17:44.992933 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:44Z","lastTransitionTime":"2026-01-28T15:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.096237 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.096351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.096380 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.096413 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.096439 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.198913 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.198953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.198963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.198975 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.198985 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.301745 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.301783 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.301801 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.301845 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.301854 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.403873 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.403953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.403968 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.403984 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.403995 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.480548 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 06:23:05.178170772 +0000 UTC Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.507377 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.507434 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.507448 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.507471 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.507485 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.586855 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.586964 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:45 crc kubenswrapper[4959]: E0128 15:17:45.587002 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:45 crc kubenswrapper[4959]: E0128 15:17:45.587102 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.609492 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.609534 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.609545 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.609559 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.609570 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.712616 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.712662 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.712677 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.712693 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.712702 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.815891 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.815942 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.815953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.815969 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.815980 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.919531 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.919650 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.919674 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.919744 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:45 crc kubenswrapper[4959]: I0128 15:17:45.919765 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:45Z","lastTransitionTime":"2026-01-28T15:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.021929 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.021967 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.021976 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.021989 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.021998 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.025484 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.025591 4959 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.025641 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs podName:943bb4d7-0907-4b19-b9e0-580af6061632 nodeName:}" failed. No retries permitted until 2026-01-28 15:17:54.025628134 +0000 UTC m=+57.471534517 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs") pod "network-metrics-daemon-4d9tj" (UID: "943bb4d7-0907-4b19-b9e0-580af6061632") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.124196 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.124242 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.124253 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.124268 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.124278 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.227522 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.227576 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.227590 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.227612 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.227626 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.331670 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.331721 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.331731 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.331757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.331769 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.434652 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.434749 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.434769 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.434803 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.434823 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.481314 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 19:43:06.270854593 +0000 UTC Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.537477 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.537526 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.537538 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.537557 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.537569 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.586680 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.586723 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.586869 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.587063 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.640479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.640519 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.640531 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.640546 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.640559 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.744980 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.745062 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.745079 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.745142 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.745165 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.847095 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.847159 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.847168 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.847183 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.847201 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.897034 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.897095 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.897127 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.897150 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.897167 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.911527 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:46Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.915467 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.915514 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.915537 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.915558 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.915570 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.929466 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:46Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.940572 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.941038 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.941173 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.941297 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.941398 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.957665 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:46Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.961998 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.962127 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.962150 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.962178 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.962196 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.976240 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:46Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.980536 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.980586 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.980597 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.980615 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.980627 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.993159 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:46Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:46 crc kubenswrapper[4959]: E0128 15:17:46.993327 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.995345 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.995398 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.995409 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.995427 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:46 crc kubenswrapper[4959]: I0128 15:17:46.995440 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:46Z","lastTransitionTime":"2026-01-28T15:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.098386 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.098515 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.098542 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.098643 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.098680 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.201399 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.201433 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.201441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.201454 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.201464 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.304361 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.304425 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.304445 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.304475 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.304496 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.407820 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.407890 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.407905 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.407921 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.407932 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.482338 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 19:05:28.539924808 +0000 UTC Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.510225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.510290 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.510301 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.510316 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.510327 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.586975 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.587169 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:47 crc kubenswrapper[4959]: E0128 15:17:47.587286 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:47 crc kubenswrapper[4959]: E0128 15:17:47.587440 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.613041 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.613084 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.613093 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.613141 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.613162 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.715385 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.715423 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.715433 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.715496 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.715511 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.818729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.818781 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.818791 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.818807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.818820 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.923970 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.924080 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.924143 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.924172 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:47 crc kubenswrapper[4959]: I0128 15:17:47.924188 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:47Z","lastTransitionTime":"2026-01-28T15:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.027178 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.027244 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.027262 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.027290 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.027303 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.130732 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.131007 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.131175 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.131284 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.131418 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.234664 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.234722 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.234744 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.234771 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.234790 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.338134 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.338464 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.338546 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.338655 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.338732 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.440972 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.441037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.441047 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.441061 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.441071 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.482602 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 10:14:56.664938057 +0000 UTC Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.544054 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.544096 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.544124 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.544143 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.544156 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.587076 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.587172 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:48 crc kubenswrapper[4959]: E0128 15:17:48.587289 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:48 crc kubenswrapper[4959]: E0128 15:17:48.587477 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.647098 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.647192 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.647208 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.647231 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.647245 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.750692 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.750765 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.750789 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.750821 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.750846 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.854022 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.854076 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.854088 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.854123 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.854149 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.958047 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.958146 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.958161 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.958182 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:48 crc kubenswrapper[4959]: I0128 15:17:48.958198 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:48Z","lastTransitionTime":"2026-01-28T15:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.060440 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.060474 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.060485 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.060497 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.060508 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.163072 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.163169 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.163182 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.163197 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.163207 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.265926 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.266018 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.266036 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.266064 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.266084 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.368582 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.368629 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.368638 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.368657 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.368669 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.472429 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.472499 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.472518 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.472547 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.472565 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.483622 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 04:34:51.425577388 +0000 UTC Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.575630 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.575682 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.575692 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.575711 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.575722 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.586102 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.586176 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:49 crc kubenswrapper[4959]: E0128 15:17:49.586248 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:49 crc kubenswrapper[4959]: E0128 15:17:49.586366 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.679797 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.680165 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.680262 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.680354 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.680445 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.784602 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.784686 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.784710 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.784740 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.784761 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.887640 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.887704 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.887714 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.887739 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.887754 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.990708 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.990769 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.990783 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.990831 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:49 crc kubenswrapper[4959]: I0128 15:17:49.990843 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:49Z","lastTransitionTime":"2026-01-28T15:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.093271 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.093319 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.093335 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.093354 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.093367 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.196880 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.196917 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.196925 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.196939 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.196948 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.300564 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.300604 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.300615 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.300630 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.300641 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.403347 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.403622 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.403692 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.403796 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.403877 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.484119 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 20:24:35.436387096 +0000 UTC Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.506160 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.506218 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.506230 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.506258 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.506275 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.586010 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:50 crc kubenswrapper[4959]: E0128 15:17:50.586171 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.586616 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:50 crc kubenswrapper[4959]: E0128 15:17:50.587184 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.588093 4959 scope.go:117] "RemoveContainer" containerID="162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.606282 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.609847 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.609963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.609995 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.610037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.610068 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.634445 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.653522 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.671032 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.699220 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.715410 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.715584 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.716156 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.716448 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.716562 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.721083 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.740266 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.753180 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.773298 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.789736 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.809723 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.819681 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.819734 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.819754 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.819781 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.819799 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.826189 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.840329 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.855981 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.879289 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fba642f6c71aa1ddbec2a23de7b38bb189a7b0eb3eea46ca2505900374c64c03\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:34Z\\\",\\\"message\\\":\\\"ice/v1/apis/informers/externalversions/factory.go:140\\\\nI0128 15:17:34.781386 6305 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0128 15:17:34.781424 6305 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0128 15:17:34.781443 6305 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0128 15:17:34.781481 6305 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 15:17:34.781480 6305 handler.go:208] Removed *v1.Node event handler 2\\\\nI0128 15:17:34.781510 6305 handler.go:208] Removed *v1.Node event handler 7\\\\nI0128 15:17:34.781519 6305 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 15:17:34.781530 6305 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 15:17:34.781552 6305 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 15:17:34.781563 6305 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 15:17:34.781560 6305 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 15:17:34.781580 6305 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 15:17:34.781584 6305 factory.go:656] Stopping watch factory\\\\nI0128 15:17:34.781593 6305 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 15:17:34.781600 6305 ovnkube.go:599] Stopped ovnkube\\\\nI0128 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:37Z\\\",\\\"message\\\":\\\"g{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-config-operator/metrics\\\\\\\"}\\\\nI0128 15:17:37.549691 6448 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-ingress/router-internal-default]} name:Service_openshift-ingress/router-internal-default_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.176:1936: 10.217.4.176:443: 10.217.4.176:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {595f6e90-7cd8-4871-85ab-9519d3c9c3e5}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:37.549825 6448 services_controller.go:360] Finished syncing service metrics on namespace openshift-config-operator for network=default : 5.737205ms\\\\nF0128 15:17:37.549829 6448 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.881772 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/1.log" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.885177 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.885326 4959 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.891619 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.904249 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.918804 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.923237 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.923277 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.923286 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.923304 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.923316 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:50Z","lastTransitionTime":"2026-01-28T15:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.939050 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:37Z\\\",\\\"message\\\":\\\"g{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-config-operator/metrics\\\\\\\"}\\\\nI0128 15:17:37.549691 6448 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-ingress/router-internal-default]} name:Service_openshift-ingress/router-internal-default_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.176:1936: 10.217.4.176:443: 10.217.4.176:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {595f6e90-7cd8-4871-85ab-9519d3c9c3e5}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:37.549825 6448 services_controller.go:360] Finished syncing service metrics on namespace openshift-config-operator for network=default : 5.737205ms\\\\nF0128 15:17:37.549829 6448 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.952652 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.968185 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.983699 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:50 crc kubenswrapper[4959]: I0128 15:17:50.998016 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:50Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.010833 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.026383 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.026439 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.026449 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.026468 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.026484 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.037299 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.054609 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.068789 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.082397 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.094122 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.117744 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.128951 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.129260 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.129374 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.129463 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.129548 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.133438 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.156056 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.231711 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.231769 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.231778 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.231793 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.231803 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.334239 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.334281 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.334292 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.334306 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.334316 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.389758 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.437287 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.437335 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.437348 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.437367 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.437378 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.484487 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 09:10:40.920037757 +0000 UTC Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.540084 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.540133 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.540141 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.540158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.540169 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.586302 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.586331 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:51 crc kubenswrapper[4959]: E0128 15:17:51.586456 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:51 crc kubenswrapper[4959]: E0128 15:17:51.586687 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.642580 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.642835 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.642926 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.643020 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.643126 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.746503 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.746596 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.746612 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.746636 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.746651 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.849626 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.849665 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.849674 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.849689 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.849699 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.890526 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/2.log" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.891211 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/1.log" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.894387 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691" exitCode=1 Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.894430 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.894477 4959 scope.go:117] "RemoveContainer" containerID="162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.895459 4959 scope.go:117] "RemoveContainer" containerID="d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691" Jan 28 15:17:51 crc kubenswrapper[4959]: E0128 15:17:51.895709 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.922656 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.922568 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:37Z\\\",\\\"message\\\":\\\"g{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-config-operator/metrics\\\\\\\"}\\\\nI0128 15:17:37.549691 6448 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-ingress/router-internal-default]} name:Service_openshift-ingress/router-internal-default_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.176:1936: 10.217.4.176:443: 10.217.4.176:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {595f6e90-7cd8-4871-85ab-9519d3c9c3e5}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:37.549825 6448 services_controller.go:360] Finished syncing service metrics on namespace openshift-config-operator for network=default : 5.737205ms\\\\nF0128 15:17:37.549829 6448 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.933812 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.937157 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.953341 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.953402 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.953418 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.953441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.953456 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:51Z","lastTransitionTime":"2026-01-28T15:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.954508 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.971197 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:51 crc kubenswrapper[4959]: I0128 15:17:51.987003 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.002408 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:51Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.019650 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.038260 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.051126 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.057006 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.057055 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.057070 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.057095 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.057149 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.065605 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.079528 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.098432 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.114274 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.128975 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.146338 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.160133 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.160223 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.160281 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.160301 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.160329 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.161809 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.175382 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.190388 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.203833 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.217059 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.245185 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://162c9a4dfcfe55738b1ba23ce5a2024c5f1290c15e6be173ff1048f24cc82268\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:37Z\\\",\\\"message\\\":\\\"g{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-config-operator/metrics\\\\\\\"}\\\\nI0128 15:17:37.549691 6448 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-ingress/router-internal-default]} name:Service_openshift-ingress/router-internal-default_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.176:1936: 10.217.4.176:443: 10.217.4.176:80:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {595f6e90-7cd8-4871-85ab-9519d3c9c3e5}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:37.549825 6448 services_controller.go:360] Finished syncing service metrics on namespace openshift-config-operator for network=default : 5.737205ms\\\\nF0128 15:17:37.549829 6448 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already,\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.256824 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.262695 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.262735 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.262743 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.262757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.262769 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.272133 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.284401 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.300581 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.313657 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.314011 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:18:24.313988488 +0000 UTC m=+87.759894871 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.313920 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.314339 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.314623 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.314721 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.314776 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.314807 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.314825 4959 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.314868 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 15:18:24.314860299 +0000 UTC m=+87.760766682 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.314948 4959 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.315067 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 15:18:24.315047614 +0000 UTC m=+87.760953997 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.314618 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.315427 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.315544 4959 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.315640 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:18:24.315626809 +0000 UTC m=+87.761533192 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.315730 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.315845 4959 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.315882 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:18:24.315875285 +0000 UTC m=+87.761781668 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.331882 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.347173 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.363443 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.365521 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.365562 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.365572 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.365585 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.365595 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.380000 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.393712 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.409313 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.424166 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.468891 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.468948 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.468965 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.469352 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.469378 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.484879 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 15:51:02.420349744 +0000 UTC Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.572950 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.573016 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.573029 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.573051 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.573064 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.586561 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.586690 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.586725 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.586928 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.677314 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.677358 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.677367 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.677386 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.677398 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.781259 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.781332 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.781351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.781378 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.781398 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.885176 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.885237 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.885256 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.885292 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.885313 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.900376 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/2.log" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.904614 4959 scope.go:117] "RemoveContainer" containerID="d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691" Jan 28 15:17:52 crc kubenswrapper[4959]: E0128 15:17:52.905004 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.925303 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.950134 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.972605 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.989032 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.989162 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.989200 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.989234 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.989254 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:52Z","lastTransitionTime":"2026-01-28T15:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:52 crc kubenswrapper[4959]: I0128 15:17:52.993720 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:52Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.024763 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.041611 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.060796 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.075802 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.091695 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.091973 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.092046 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.092185 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.092261 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.093563 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.107015 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.123330 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.138916 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.152656 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.189024 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.194932 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.194973 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.194984 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.195006 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.195021 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.211252 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.233349 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.246955 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:53Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.302234 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.302310 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.302326 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.302351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.302367 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.405661 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.405746 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.405756 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.405795 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.405809 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.485431 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 02:47:19.608309613 +0000 UTC Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.508736 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.508781 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.508792 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.508813 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.508825 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.586770 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.586896 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:53 crc kubenswrapper[4959]: E0128 15:17:53.586944 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:53 crc kubenswrapper[4959]: E0128 15:17:53.587161 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.611488 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.611569 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.611579 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.611604 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.611615 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.713780 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.713825 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.713836 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.713854 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.713868 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.817527 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.817604 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.817625 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.817658 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.817679 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.920943 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.921004 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.921015 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.921031 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:53 crc kubenswrapper[4959]: I0128 15:17:53.921053 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:53Z","lastTransitionTime":"2026-01-28T15:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.024723 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.024803 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.024844 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.024896 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.024924 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.037588 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:54 crc kubenswrapper[4959]: E0128 15:17:54.037788 4959 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:54 crc kubenswrapper[4959]: E0128 15:17:54.037882 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs podName:943bb4d7-0907-4b19-b9e0-580af6061632 nodeName:}" failed. No retries permitted until 2026-01-28 15:18:10.037856315 +0000 UTC m=+73.483762708 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs") pod "network-metrics-daemon-4d9tj" (UID: "943bb4d7-0907-4b19-b9e0-580af6061632") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.128723 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.128840 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.128869 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.128908 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.128933 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.232687 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.232855 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.232923 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.232954 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.233009 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.336628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.336691 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.336709 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.336735 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.336752 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.439999 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.440053 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.440063 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.440081 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.440096 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.486454 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 02:02:42.496725507 +0000 UTC Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.542911 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.543000 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.543015 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.543034 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.543047 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.586229 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.586299 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:54 crc kubenswrapper[4959]: E0128 15:17:54.586409 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:54 crc kubenswrapper[4959]: E0128 15:17:54.586478 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.645594 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.645689 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.645702 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.645721 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.645736 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.747859 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.747902 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.747913 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.747931 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.747957 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.850908 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.850955 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.850964 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.850981 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.850990 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.954428 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.954486 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.954497 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.954514 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:54 crc kubenswrapper[4959]: I0128 15:17:54.954529 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:54Z","lastTransitionTime":"2026-01-28T15:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.057026 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.057138 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.057158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.057186 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.057206 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.160040 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.160078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.160087 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.160123 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.160132 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.263370 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.263468 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.263497 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.263538 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.263568 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.366886 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.366981 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.367008 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.367047 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.367074 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.470289 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.470383 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.470411 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.470434 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.470450 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.487469 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 15:25:30.565789541 +0000 UTC Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.573773 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.573831 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.573843 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.573862 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.573872 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.586264 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:55 crc kubenswrapper[4959]: E0128 15:17:55.586372 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.586351 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:55 crc kubenswrapper[4959]: E0128 15:17:55.586660 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.675812 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.675854 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.675865 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.675879 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.675888 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.778564 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.778628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.778640 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.778658 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.778669 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.881643 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.881673 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.881702 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.881730 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.881742 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.983809 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.983849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.983860 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.983877 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:55 crc kubenswrapper[4959]: I0128 15:17:55.983891 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:55Z","lastTransitionTime":"2026-01-28T15:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.086231 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.086668 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.086681 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.086699 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.086711 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.189922 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.189961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.189972 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.189988 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.190000 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.293034 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.293075 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.293083 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.293099 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.293123 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.395946 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.395983 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.395992 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.396007 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.396017 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.488675 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 08:51:02.240131647 +0000 UTC Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.499101 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.499200 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.499221 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.499256 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.499278 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.586465 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.586559 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:56 crc kubenswrapper[4959]: E0128 15:17:56.586725 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:56 crc kubenswrapper[4959]: E0128 15:17:56.586861 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.601688 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.601727 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.601738 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.601757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.601770 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.705202 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.705248 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.705260 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.705276 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.705289 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.808524 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.808583 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.808597 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.808619 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.808637 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.913306 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.913386 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.913397 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.913433 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:56 crc kubenswrapper[4959]: I0128 15:17:56.913446 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:56Z","lastTransitionTime":"2026-01-28T15:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.017716 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.017786 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.017804 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.017831 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.017850 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.121335 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.121398 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.121411 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.121435 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.121447 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.149609 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.149720 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.149762 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.149808 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.149840 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: E0128 15:17:57.167612 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:57Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.173718 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.173931 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.174010 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.174079 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.174169 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: E0128 15:17:57.187552 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:57Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.191703 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.191783 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.191804 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.191831 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.191847 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: E0128 15:17:57.205599 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:57Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.209643 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.209708 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.209742 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.209772 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.209801 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: E0128 15:17:57.223411 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:57Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.228709 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.228757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.228771 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.229070 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.229119 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: E0128 15:17:57.245405 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:17:57Z is after 2025-08-24T17:21:41Z" Jan 28 15:17:57 crc kubenswrapper[4959]: E0128 15:17:57.245531 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.247316 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.247349 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.247359 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.247373 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.247387 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.351132 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.351188 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.351203 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.351226 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.351247 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.454940 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.455025 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.455050 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.455081 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.455103 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.489902 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 13:40:44.984106898 +0000 UTC Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.558617 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.558701 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.558718 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.558743 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.558758 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.586817 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.586932 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:57 crc kubenswrapper[4959]: E0128 15:17:57.586971 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:57 crc kubenswrapper[4959]: E0128 15:17:57.587187 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.662325 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.662400 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.662422 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.662452 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.662477 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.766508 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.766582 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.766601 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.766633 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.766655 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.870147 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.870219 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.870239 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.870267 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.870286 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.972405 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.972441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.972450 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.972464 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:57 crc kubenswrapper[4959]: I0128 15:17:57.972473 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:57Z","lastTransitionTime":"2026-01-28T15:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.074356 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.074387 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.074396 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.074410 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.074418 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.177742 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.177809 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.177826 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.177849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.177863 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.281287 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.281336 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.281346 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.281363 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.281373 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.385078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.385246 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.385273 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.385315 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.385343 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.488776 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.488860 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.488879 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.488900 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.488917 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.490206 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 22:14:27.426352943 +0000 UTC Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.587195 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.587202 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:17:58 crc kubenswrapper[4959]: E0128 15:17:58.587368 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:17:58 crc kubenswrapper[4959]: E0128 15:17:58.587534 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.592234 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.592286 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.592301 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.592324 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.592339 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.694970 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.695009 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.695061 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.695077 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.695091 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.797531 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.797568 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.797579 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.797593 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.797603 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.900027 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.900077 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.900086 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.900099 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:58 crc kubenswrapper[4959]: I0128 15:17:58.900113 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:58Z","lastTransitionTime":"2026-01-28T15:17:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.001941 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.001975 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.001983 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.001996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.002005 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.104479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.104525 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.104539 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.104556 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.104568 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.206377 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.206413 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.206422 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.206440 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.206452 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.308809 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.308945 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.309003 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.309037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.309061 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.410969 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.411010 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.411020 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.411038 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.411051 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.490532 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 14:51:34.38109765 +0000 UTC Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.512964 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.512997 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.513004 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.513017 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.513026 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.586651 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.586709 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:17:59 crc kubenswrapper[4959]: E0128 15:17:59.586816 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:17:59 crc kubenswrapper[4959]: E0128 15:17:59.586895 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.616374 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.616414 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.616421 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.616437 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.616447 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.718550 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.718587 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.718595 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.718611 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.718620 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.820895 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.820985 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.820995 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.821009 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.821019 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.923964 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.924220 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.924237 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.924257 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:17:59 crc kubenswrapper[4959]: I0128 15:17:59.924268 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:17:59Z","lastTransitionTime":"2026-01-28T15:17:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.026950 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.026996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.027008 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.027028 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.027040 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.129925 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.129999 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.130017 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.130039 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.130054 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.232838 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.232922 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.232939 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.232963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.232980 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.336054 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.336149 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.336165 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.336194 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.336222 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.438795 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.438848 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.438858 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.438879 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.438893 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.491734 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 16:51:02.064936097 +0000 UTC Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.542101 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.542213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.542230 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.542255 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.542272 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.586209 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.586312 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:00 crc kubenswrapper[4959]: E0128 15:18:00.586505 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:00 crc kubenswrapper[4959]: E0128 15:18:00.586634 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.600874 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.616519 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.627850 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.642380 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.646075 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.646143 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.646156 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.646173 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.646184 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.656807 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.676334 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.688043 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.698909 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.716270 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.726590 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.740066 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.750820 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.750857 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.750868 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.750884 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.750895 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.754343 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.767564 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.780612 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.795926 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.807255 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.820383 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:00Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.853441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.853521 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.853535 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.853550 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.853779 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.956593 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.956663 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.956677 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.956699 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:00 crc kubenswrapper[4959]: I0128 15:18:00.956711 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:00Z","lastTransitionTime":"2026-01-28T15:18:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.059045 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.059081 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.059091 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.059112 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.059124 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.160920 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.160972 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.160981 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.160995 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.161024 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.263570 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.263597 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.263604 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.263617 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.263625 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.365738 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.365770 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.365778 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.365792 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.365801 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.467854 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.467900 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.467908 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.467922 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.467932 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.491951 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 19:09:51.780291422 +0000 UTC Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.570210 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.570250 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.570259 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.570275 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.570285 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.586722 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.586748 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:01 crc kubenswrapper[4959]: E0128 15:18:01.586914 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:01 crc kubenswrapper[4959]: E0128 15:18:01.586945 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.672866 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.673415 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.673490 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.673526 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.673556 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.776836 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.777014 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.777036 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.777063 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.777081 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.879361 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.879430 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.879444 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.879467 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.879483 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.984042 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.984133 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.984150 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.984166 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:01 crc kubenswrapper[4959]: I0128 15:18:01.984175 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:01Z","lastTransitionTime":"2026-01-28T15:18:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.087057 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.087141 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.087178 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.087239 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.087256 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.189714 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.189784 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.189804 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.189836 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.189859 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.292858 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.292901 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.292912 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.292930 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.292940 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.395535 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.395570 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.395597 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.395624 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.395636 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.493097 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 05:56:47.377074963 +0000 UTC Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.497963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.497999 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.498010 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.498025 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.498036 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.586881 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.586916 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:02 crc kubenswrapper[4959]: E0128 15:18:02.587028 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:02 crc kubenswrapper[4959]: E0128 15:18:02.587190 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.600491 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.600547 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.600557 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.600575 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.600587 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.702474 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.702518 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.702530 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.702546 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.702556 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.804989 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.805021 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.805029 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.805044 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.805054 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.908329 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.908367 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.908376 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.908390 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:02 crc kubenswrapper[4959]: I0128 15:18:02.908399 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:02Z","lastTransitionTime":"2026-01-28T15:18:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.010144 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.010196 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.010205 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.010220 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.010237 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.111914 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.111961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.111970 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.111985 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.111996 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.214410 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.214450 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.214459 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.214473 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.214483 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.316810 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.316859 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.316870 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.316891 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.316904 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.419116 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.419160 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.419169 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.419183 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.419193 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.493257 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 16:25:24.861123252 +0000 UTC Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.521894 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.521953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.521962 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.521979 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.521991 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.586840 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.586935 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:03 crc kubenswrapper[4959]: E0128 15:18:03.586982 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:03 crc kubenswrapper[4959]: E0128 15:18:03.587073 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.624184 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.624225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.624237 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.624253 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.624265 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.726330 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.726360 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.726368 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.726381 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.726389 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.828974 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.829007 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.829018 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.829030 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.829038 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.930746 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.930790 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.930805 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.930823 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:03 crc kubenswrapper[4959]: I0128 15:18:03.930834 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:03Z","lastTransitionTime":"2026-01-28T15:18:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.032819 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.032864 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.032873 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.032887 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.032897 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.134632 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.134668 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.134677 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.134691 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.134701 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.237428 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.237468 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.237479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.237495 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.237506 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.340105 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.340159 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.340169 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.340184 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.340194 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.442477 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.442513 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.442523 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.442539 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.442550 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.494018 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 02:09:26.998412626 +0000 UTC Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.544086 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.544143 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.544156 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.544172 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.544182 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.586433 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:04 crc kubenswrapper[4959]: E0128 15:18:04.586562 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.586620 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:04 crc kubenswrapper[4959]: E0128 15:18:04.586788 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.646347 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.646407 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.646423 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.646463 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.646476 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.749873 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.749982 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.750008 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.750041 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.750062 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.853714 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.853788 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.853807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.853825 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.853838 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.957057 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.957133 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.957144 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.957164 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:04 crc kubenswrapper[4959]: I0128 15:18:04.957197 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:04Z","lastTransitionTime":"2026-01-28T15:18:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.060944 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.060996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.061010 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.061055 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.061069 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.164348 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.164393 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.164403 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.164437 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.164449 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.266834 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.266878 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.266889 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.266906 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.266920 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.369097 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.369174 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.369187 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.369228 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.369239 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.471931 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.471966 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.471976 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.471989 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.471998 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.494442 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 13:03:13.000492464 +0000 UTC Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.573874 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.573918 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.573933 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.573956 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.573969 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.586634 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.586816 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:05 crc kubenswrapper[4959]: E0128 15:18:05.586968 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:05 crc kubenswrapper[4959]: E0128 15:18:05.587049 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.675592 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.675643 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.675655 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.675670 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.675681 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.778231 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.778274 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.778285 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.778302 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.778319 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.880635 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.880681 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.880690 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.880703 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.880712 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.982747 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.982793 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.982804 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.982820 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:05 crc kubenswrapper[4959]: I0128 15:18:05.982832 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:05Z","lastTransitionTime":"2026-01-28T15:18:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.084680 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.084716 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.084729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.084743 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.084752 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.186918 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.186953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.186964 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.186977 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.186991 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.289576 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.289615 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.289624 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.289640 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.289649 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.391531 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.391570 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.391582 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.391598 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.391610 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.493677 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.493717 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.493728 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.493745 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.493757 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.494809 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 02:19:58.866140015 +0000 UTC Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.586956 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:06 crc kubenswrapper[4959]: E0128 15:18:06.587058 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.587175 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:06 crc kubenswrapper[4959]: E0128 15:18:06.587681 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.587880 4959 scope.go:117] "RemoveContainer" containerID="d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691" Jan 28 15:18:06 crc kubenswrapper[4959]: E0128 15:18:06.588143 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.597686 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.597761 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.597776 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.597790 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.597802 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.599006 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.700288 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.700316 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.700323 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.700335 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.700345 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.802637 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.802676 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.802684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.802700 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.802710 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.905433 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.905460 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.905468 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.905482 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:06 crc kubenswrapper[4959]: I0128 15:18:06.905494 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:06Z","lastTransitionTime":"2026-01-28T15:18:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.007420 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.007460 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.007470 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.007485 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.007495 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.109790 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.109820 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.109831 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.109845 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.109857 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.212409 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.212441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.212449 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.212463 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.212472 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.315030 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.315067 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.315082 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.315099 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.315127 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.417519 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.417555 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.417564 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.417580 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.417589 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.494880 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 07:29:54.619638271 +0000 UTC Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.520339 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.520380 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.520393 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.520411 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.520425 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.578469 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.578503 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.578511 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.578525 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.578534 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.586334 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.586384 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:07 crc kubenswrapper[4959]: E0128 15:18:07.586471 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:07 crc kubenswrapper[4959]: E0128 15:18:07.586551 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:07 crc kubenswrapper[4959]: E0128 15:18:07.596018 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:07Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.600043 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.600075 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.600085 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.600101 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.600139 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: E0128 15:18:07.612776 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:07Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.617324 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.617380 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.617393 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.617411 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.617426 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: E0128 15:18:07.630591 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:07Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.633790 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.633826 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.633841 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.633859 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.633870 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: E0128 15:18:07.645770 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:07Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.651213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.651243 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.651254 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.651315 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.651328 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: E0128 15:18:07.665219 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:07Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:07 crc kubenswrapper[4959]: E0128 15:18:07.665351 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.666600 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.666628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.666638 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.666655 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.666667 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.768855 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.768891 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.768903 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.768916 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.768927 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.870729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.870760 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.870768 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.870780 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.870789 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.972429 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.972458 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.972466 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.972504 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:07 crc kubenswrapper[4959]: I0128 15:18:07.972514 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:07Z","lastTransitionTime":"2026-01-28T15:18:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.074628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.074657 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.074666 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.074678 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.074687 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.177391 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.177430 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.177439 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.177452 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.177461 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.279934 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.279979 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.279991 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.280006 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.280018 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.381807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.381851 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.381863 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.381877 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.381890 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.485056 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.485172 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.485191 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.485207 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.485223 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.495628 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 02:06:56.374102631 +0000 UTC Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.586591 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.586634 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:08 crc kubenswrapper[4959]: E0128 15:18:08.586746 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:08 crc kubenswrapper[4959]: E0128 15:18:08.586876 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.588176 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.588206 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.588215 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.588227 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.588236 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.690320 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.690363 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.690374 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.690390 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.690402 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.792418 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.792449 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.792457 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.792470 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.792478 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.894325 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.894366 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.894376 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.894394 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.894405 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.996491 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.996533 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.996541 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.996558 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:08 crc kubenswrapper[4959]: I0128 15:18:08.996568 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:08Z","lastTransitionTime":"2026-01-28T15:18:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.098171 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.098439 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.098521 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.098612 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.098692 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.200931 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.201172 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.201290 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.201378 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.201463 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.303961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.304270 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.304352 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.304488 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.304546 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.407479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.407513 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.407522 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.407537 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.407546 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.495803 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 02:58:41.450939338 +0000 UTC Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.509798 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.509837 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.509845 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.509862 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.509873 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.586270 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:09 crc kubenswrapper[4959]: E0128 15:18:09.586420 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.586520 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:09 crc kubenswrapper[4959]: E0128 15:18:09.586667 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.612843 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.612894 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.612905 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.612921 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.612932 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.715874 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.715913 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.715925 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.715941 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.715953 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.818658 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.818701 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.818710 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.818729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.818738 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.920760 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.920798 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.920810 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.920824 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:09 crc kubenswrapper[4959]: I0128 15:18:09.920834 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:09Z","lastTransitionTime":"2026-01-28T15:18:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.023360 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.023393 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.023418 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.023434 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.023446 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.102861 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:10 crc kubenswrapper[4959]: E0128 15:18:10.103168 4959 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:18:10 crc kubenswrapper[4959]: E0128 15:18:10.103267 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs podName:943bb4d7-0907-4b19-b9e0-580af6061632 nodeName:}" failed. No retries permitted until 2026-01-28 15:18:42.103241128 +0000 UTC m=+105.549147521 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs") pod "network-metrics-daemon-4d9tj" (UID: "943bb4d7-0907-4b19-b9e0-580af6061632") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.126053 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.126302 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.126381 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.126496 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.126565 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.229022 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.229069 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.229089 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.229180 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.229193 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.331344 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.331397 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.331406 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.331421 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.331430 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.434211 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.434257 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.434269 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.434285 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.434300 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.496272 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 16:30:53.572694409 +0000 UTC Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.537409 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.537444 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.537456 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.537472 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.537481 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.587038 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:10 crc kubenswrapper[4959]: E0128 15:18:10.587173 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.587248 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:10 crc kubenswrapper[4959]: E0128 15:18:10.587406 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.605918 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.617955 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.630544 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.639627 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.639732 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.639754 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.639770 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.639780 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.642395 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.654919 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.668282 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.686957 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.699513 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.712181 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.722403 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.737935 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.741628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.741660 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.741669 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.741682 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.741691 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.754387 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.766414 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.781816 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.794234 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.809422 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.823772 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.838423 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:10Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.843930 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.843972 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.843980 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.843997 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.844006 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.946775 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.947141 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.947158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.947180 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:10 crc kubenswrapper[4959]: I0128 15:18:10.947190 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:10Z","lastTransitionTime":"2026-01-28T15:18:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.048885 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.048917 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.048928 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.048943 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.048956 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.151253 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.151344 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.151358 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.151376 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.151387 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.253508 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.253544 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.253553 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.253566 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.253576 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.356355 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.356395 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.356407 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.356426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.356441 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.459685 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.459722 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.459738 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.459756 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.459767 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.496834 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 02:07:35.493742398 +0000 UTC Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.561616 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.561683 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.561692 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.561705 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.561716 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.586977 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.587032 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:11 crc kubenswrapper[4959]: E0128 15:18:11.587158 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:11 crc kubenswrapper[4959]: E0128 15:18:11.587271 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.664055 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.664140 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.664152 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.664177 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.664192 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.766362 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.766399 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.766409 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.766448 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.766459 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.868650 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.868707 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.868717 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.868738 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.868758 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.964822 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/0.log" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.964880 4959 generic.go:334] "Generic (PLEG): container finished" podID="1c1dca0a-c782-43f9-9390-7dc9c5311b97" containerID="17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826" exitCode=1 Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.964914 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bbjnj" event={"ID":"1c1dca0a-c782-43f9-9390-7dc9c5311b97","Type":"ContainerDied","Data":"17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.965355 4959 scope.go:117] "RemoveContainer" containerID="17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.974298 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.974352 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.974365 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.974385 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.974397 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:11Z","lastTransitionTime":"2026-01-28T15:18:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.983635 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"2026-01-28T15:17:26+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3\\\\n2026-01-28T15:17:26+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3 to /host/opt/cni/bin/\\\\n2026-01-28T15:17:26Z [verbose] multus-daemon started\\\\n2026-01-28T15:17:26Z [verbose] Readiness Indicator file check\\\\n2026-01-28T15:18:11Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:11Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:11 crc kubenswrapper[4959]: I0128 15:18:11.993398 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:11Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.007809 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.019254 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.031580 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.043078 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.053730 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.070570 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.076389 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.076426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.076440 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.076468 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.076487 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.081626 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.094009 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.106817 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.116393 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.126800 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.138769 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.149666 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.160352 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.168757 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.177434 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.178729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.178757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.178766 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.178781 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.178791 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.281159 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.281372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.281448 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.281529 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.281594 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.383476 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.383514 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.383525 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.383540 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.383551 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.486130 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.486183 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.486196 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.486221 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.486233 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.497233 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 11:58:51.707799763 +0000 UTC Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.587281 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.587323 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:12 crc kubenswrapper[4959]: E0128 15:18:12.587500 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:12 crc kubenswrapper[4959]: E0128 15:18:12.587560 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.588522 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.588552 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.588563 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.588580 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.588591 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.690749 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.690778 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.690786 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.690800 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.690809 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.794409 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.794448 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.794459 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.794472 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.794482 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.896295 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.896343 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.896353 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.896366 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.896376 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.970158 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/0.log" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.970215 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bbjnj" event={"ID":"1c1dca0a-c782-43f9-9390-7dc9c5311b97","Type":"ContainerStarted","Data":"62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d"} Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.984566 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.996129 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:12Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.998609 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.998642 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.998654 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.998671 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:12 crc kubenswrapper[4959]: I0128 15:18:12.998681 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:12Z","lastTransitionTime":"2026-01-28T15:18:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.010330 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.020933 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.033060 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.044925 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.063365 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.074563 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.086530 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.097813 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.100520 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.100556 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.100565 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.100579 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.100591 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.108544 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.118744 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"2026-01-28T15:17:26+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3\\\\n2026-01-28T15:17:26+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3 to /host/opt/cni/bin/\\\\n2026-01-28T15:17:26Z [verbose] multus-daemon started\\\\n2026-01-28T15:17:26Z [verbose] Readiness Indicator file check\\\\n2026-01-28T15:18:11Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:18:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.127546 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.139287 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.151589 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.161880 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.173008 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.193623 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:13Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.202920 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.202943 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.202952 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.202965 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.203001 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.305266 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.305306 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.305316 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.305329 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.305339 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.407515 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.407555 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.407568 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.407582 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.407590 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.498164 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 09:20:00.239590652 +0000 UTC Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.509671 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.509743 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.509759 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.509776 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.509788 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.586856 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.586987 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:13 crc kubenswrapper[4959]: E0128 15:18:13.587203 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:13 crc kubenswrapper[4959]: E0128 15:18:13.587446 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.612543 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.612592 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.612604 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.612622 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.612634 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.714948 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.714986 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.714996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.715013 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.715024 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.817483 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.817738 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.817825 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.817918 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.818022 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.920369 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.920426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.920436 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.920462 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:13 crc kubenswrapper[4959]: I0128 15:18:13.920472 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:13Z","lastTransitionTime":"2026-01-28T15:18:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.022974 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.023280 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.023384 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.023465 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.023553 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.126180 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.126213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.126221 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.126234 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.126247 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.228653 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.228695 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.228706 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.228722 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.228733 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.331361 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.331400 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.331413 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.331428 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.331441 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.433932 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.433963 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.433977 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.433992 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.434004 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.498757 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 07:47:42.066449224 +0000 UTC Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.536284 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.536316 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.536324 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.536337 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.536345 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.586940 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.587009 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:14 crc kubenswrapper[4959]: E0128 15:18:14.587064 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:14 crc kubenswrapper[4959]: E0128 15:18:14.587156 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.638058 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.638288 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.638373 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.638441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.638501 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.740222 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.740264 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.740273 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.740286 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.740294 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.844180 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.844223 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.844233 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.844250 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.844262 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.946948 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.946989 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.946998 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.947012 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:14 crc kubenswrapper[4959]: I0128 15:18:14.947021 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:14Z","lastTransitionTime":"2026-01-28T15:18:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.049013 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.049050 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.049060 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.049077 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.049087 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.150965 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.151005 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.151016 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.151031 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.151043 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.253316 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.253357 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.253368 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.253386 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.253400 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.355971 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.356003 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.356011 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.356024 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.356034 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.458388 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.458432 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.458445 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.458462 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.458476 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.500806 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 04:41:37.917188973 +0000 UTC Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.561789 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.561821 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.561832 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.561849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.561857 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.586146 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.586222 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:15 crc kubenswrapper[4959]: E0128 15:18:15.586281 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:15 crc kubenswrapper[4959]: E0128 15:18:15.586392 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.664768 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.664802 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.664813 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.664827 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.664837 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.766411 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.766458 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.766471 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.766489 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.766502 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.868637 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.868675 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.868684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.868703 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.868712 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.970998 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.971069 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.971083 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.971124 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:15 crc kubenswrapper[4959]: I0128 15:18:15.971143 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:15Z","lastTransitionTime":"2026-01-28T15:18:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.074041 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.074083 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.074093 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.074128 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.074143 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.175900 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.175941 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.175951 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.175965 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.175974 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.278631 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.278673 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.278684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.278698 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.278708 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.381017 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.381055 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.381077 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.381095 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.381120 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.482960 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.483001 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.483011 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.483028 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.483038 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.500940 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 23:49:14.513589809 +0000 UTC Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.585204 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.585267 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.585280 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.585304 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.585318 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.586447 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:16 crc kubenswrapper[4959]: E0128 15:18:16.586561 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.586713 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:16 crc kubenswrapper[4959]: E0128 15:18:16.586800 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.688090 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.688147 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.688156 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.688169 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.688179 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.790725 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.790756 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.790766 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.790779 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.790789 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.893034 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.893101 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.893135 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.893154 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.893164 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.995520 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.995571 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.995582 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.995600 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:16 crc kubenswrapper[4959]: I0128 15:18:16.995612 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:16Z","lastTransitionTime":"2026-01-28T15:18:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.097424 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.097474 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.097482 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.097499 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.097509 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.199671 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.199729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.199742 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.199760 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.199776 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.302292 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.302326 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.302335 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.302350 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.302359 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.405050 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.405135 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.405144 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.405160 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.405169 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.502125 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 14:28:39.878304354 +0000 UTC Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.507864 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.507893 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.507907 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.507922 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.507931 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.586748 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.586782 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:17 crc kubenswrapper[4959]: E0128 15:18:17.586880 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:17 crc kubenswrapper[4959]: E0128 15:18:17.587031 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.613807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.613851 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.613860 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.613874 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.613883 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.716625 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.716698 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.716717 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.716747 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.716767 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.735983 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.736062 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.736085 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.736148 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.736175 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: E0128 15:18:17.748227 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:17Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.751750 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.751790 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.751803 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.751820 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.751834 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: E0128 15:18:17.767880 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:17Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.771098 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.771133 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.771142 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.771157 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.771167 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: E0128 15:18:17.782174 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:17Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.785447 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.785535 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.785556 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.785589 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.785609 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: E0128 15:18:17.798731 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:17Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.801699 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.801741 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.801753 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.801771 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.801784 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: E0128 15:18:17.814065 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:17Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:17 crc kubenswrapper[4959]: E0128 15:18:17.814199 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.818684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.818716 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.818724 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.818754 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.818765 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.921020 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.921059 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.921067 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.921081 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:17 crc kubenswrapper[4959]: I0128 15:18:17.921093 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:17Z","lastTransitionTime":"2026-01-28T15:18:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.023392 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.023431 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.023442 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.023457 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.023468 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.125491 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.125548 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.125562 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.125579 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.125597 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.227637 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.227677 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.227686 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.227701 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.227713 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.330043 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.330084 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.330092 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.330118 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.330129 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.432727 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.432784 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.432795 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.432813 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.432829 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.502665 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 08:49:17.948481864 +0000 UTC Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.536006 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.536059 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.536067 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.536081 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.536091 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.587225 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.587225 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:18 crc kubenswrapper[4959]: E0128 15:18:18.587482 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:18 crc kubenswrapper[4959]: E0128 15:18:18.587605 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.639626 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.639697 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.639715 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.639733 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.639744 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.742947 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.743015 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.743033 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.743060 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.743082 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.846142 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.846182 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.846194 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.846216 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.846227 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.949327 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.949372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.949382 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.949395 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:18 crc kubenswrapper[4959]: I0128 15:18:18.949406 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:18Z","lastTransitionTime":"2026-01-28T15:18:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.051569 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.051606 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.051618 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.051632 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.051645 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.154135 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.154200 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.154214 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.154231 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.154249 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.256578 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.256611 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.256619 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.256631 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.256640 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.359234 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.359262 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.359271 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.359283 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.359309 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.461985 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.462035 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.462046 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.462063 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.462075 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.502832 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 04:40:15.551004421 +0000 UTC Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.564485 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.564532 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.564544 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.564561 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.564574 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.586485 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.586651 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:19 crc kubenswrapper[4959]: E0128 15:18:19.586778 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:19 crc kubenswrapper[4959]: E0128 15:18:19.586864 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.666493 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.666536 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.666547 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.666561 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.666570 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.769714 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.769780 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.769792 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.769808 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.769820 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.872086 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.872161 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.872171 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.872184 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.872211 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.974185 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.974291 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.974303 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.974318 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:19 crc kubenswrapper[4959]: I0128 15:18:19.974330 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:19Z","lastTransitionTime":"2026-01-28T15:18:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.076439 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.076489 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.076501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.076517 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.076529 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.179030 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.179279 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.179585 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.179601 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.179611 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.281315 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.281350 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.281358 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.281372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.281382 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.383514 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.383558 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.383570 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.383585 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.383596 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.485918 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.485969 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.485985 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.486003 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.486014 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.503369 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 23:07:43.430461469 +0000 UTC Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.586793 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.586976 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:20 crc kubenswrapper[4959]: E0128 15:18:20.587213 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:20 crc kubenswrapper[4959]: E0128 15:18:20.587347 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.587445 4959 scope.go:117] "RemoveContainer" containerID="d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.588498 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.588520 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.588529 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.588543 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.588552 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.600235 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.614647 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.637023 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.651728 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.664837 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.679521 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.690887 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.690930 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.690941 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.690956 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.690966 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.691553 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.706165 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"2026-01-28T15:17:26+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3\\\\n2026-01-28T15:17:26+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3 to /host/opt/cni/bin/\\\\n2026-01-28T15:17:26Z [verbose] multus-daemon started\\\\n2026-01-28T15:17:26Z [verbose] Readiness Indicator file check\\\\n2026-01-28T15:18:11Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:18:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.719344 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.733729 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.746041 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.758028 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.769163 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.786522 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.793423 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.793464 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.793476 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.793491 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.793502 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.801965 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.812257 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.824046 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.835423 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:20Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.896165 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.896198 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.896209 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.896225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.896236 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.995854 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/2.log" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.997360 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.997396 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.997406 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.997425 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.997436 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:20Z","lastTransitionTime":"2026-01-28T15:18:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.999145 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} Jan 28 15:18:20 crc kubenswrapper[4959]: I0128 15:18:20.999900 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.015379 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.031932 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.050045 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.068375 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.085678 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.101836 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.101887 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.101898 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.101919 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.101934 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.107683 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"2026-01-28T15:17:26+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3\\\\n2026-01-28T15:17:26+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3 to /host/opt/cni/bin/\\\\n2026-01-28T15:17:26Z [verbose] multus-daemon started\\\\n2026-01-28T15:17:26Z [verbose] Readiness Indicator file check\\\\n2026-01-28T15:18:11Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:18:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.127178 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.143922 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.159148 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.173564 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.196995 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.204774 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.204811 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.204818 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.204833 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.204847 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.221891 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:18:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.235413 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.251324 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.266978 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.282560 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.298953 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.313168 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.313223 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.313237 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.313259 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.313274 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.316138 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.416598 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.416648 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.416660 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.416705 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.416717 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.504178 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 07:22:09.325082366 +0000 UTC Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.519163 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.519204 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.519213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.519228 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.519241 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.586568 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.586587 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:21 crc kubenswrapper[4959]: E0128 15:18:21.586954 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:21 crc kubenswrapper[4959]: E0128 15:18:21.587058 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.599061 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.621935 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.621980 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.621989 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.622004 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.622015 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.724895 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.724941 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.724953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.724971 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.724985 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.827777 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.827833 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.827849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.827871 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.827888 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.931252 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.931309 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.931320 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.931340 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:21 crc kubenswrapper[4959]: I0128 15:18:21.931354 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:21Z","lastTransitionTime":"2026-01-28T15:18:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.004993 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/3.log" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.005954 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/2.log" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.009021 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" exitCode=1 Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.009149 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.009239 4959 scope.go:117] "RemoveContainer" containerID="d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.010081 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:18:22 crc kubenswrapper[4959]: E0128 15:18:22.010297 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.034641 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.034688 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.034699 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.034715 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.034727 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.034673 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.051320 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.063948 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.076557 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.088776 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.103344 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"2026-01-28T15:17:26+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3\\\\n2026-01-28T15:17:26+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3 to /host/opt/cni/bin/\\\\n2026-01-28T15:17:26Z [verbose] multus-daemon started\\\\n2026-01-28T15:17:26Z [verbose] Readiness Indicator file check\\\\n2026-01-28T15:18:11Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:18:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.116051 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.133204 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.137768 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.137816 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.137832 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.137850 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.137861 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.146584 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.166148 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200c01ea-5be9-4295-b8c7-19cd0979314c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748fbba6db4ab0b5e79078734446f3a00090bebae2f4591ba9f1bc17e6bb8a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06b4ee0080e1c65f8310ac1207a98c59b15714c666839a390da6f8e4555563e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc2b61c48cd69c4512e49fc0ec433dfcffd9a50bd7fe6fa5aac2693a55ac1f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8e42ad11b473d9e0eb5d2a6c15fa6425b3f0980bc463845723402abef1d616c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d0e73acc6693630ed8b0b44cbd4728f5f634d0c8eb2919b4b608c2167905b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.182462 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.201291 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.212722 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.228914 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d15df1b27027fcb072c4707894e003d968072894514e8159647a7b4eef9e6691\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"message\\\":\\\"t-network-console/networking-console-plugin_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0128 15:17:51.709495 6648 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf in node crc\\\\nI0128 15:17:51.709466 6648 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nF0128 15:17:51.709606 6648 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:21Z\\\",\\\"message\\\":\\\"t-network-operator/iptables-alerter-4ln5h after 0 failed attempt(s)\\\\nI0128 15:18:21.508499 7063 default_network_controller.go:776] Recording success event on pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 15:18:21.508329 7063 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-controllers for network=default are: map[]\\\\nF0128 15:18:21.508507 7063 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z]\\\\nI0128 15:18:21.508502 7063 services_controller.go:451] Built service openshift-kube-storage-version-migrator-operator/metrics cluster-wide LB for netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:18:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.239946 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.239982 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.239992 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.240007 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.240021 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.240545 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.253684 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.265587 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.277458 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.288719 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:22Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.342912 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.342952 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.342961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.343001 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.343013 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.445439 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.445470 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.445477 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.445489 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.445497 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.504384 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 01:28:07.625713744 +0000 UTC Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.548471 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.548528 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.548539 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.548555 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.548569 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.586243 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.586313 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:22 crc kubenswrapper[4959]: E0128 15:18:22.586396 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:22 crc kubenswrapper[4959]: E0128 15:18:22.586558 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.650767 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.650794 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.650803 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.650818 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.650828 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.753807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.753878 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.753898 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.753929 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.753951 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.857626 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.857680 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.857694 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.857712 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.857723 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.961082 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.961173 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.961185 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.961226 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:22 crc kubenswrapper[4959]: I0128 15:18:22.961240 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:22Z","lastTransitionTime":"2026-01-28T15:18:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.015327 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/3.log" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.021151 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:18:23 crc kubenswrapper[4959]: E0128 15:18:23.021320 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.033042 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.044398 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.056984 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.065705 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.065892 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.065961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.066059 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.066144 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.069928 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.084369 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.108380 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:21Z\\\",\\\"message\\\":\\\"t-network-operator/iptables-alerter-4ln5h after 0 failed attempt(s)\\\\nI0128 15:18:21.508499 7063 default_network_controller.go:776] Recording success event on pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 15:18:21.508329 7063 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-controllers for network=default are: map[]\\\\nF0128 15:18:21.508507 7063 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z]\\\\nI0128 15:18:21.508502 7063 services_controller.go:451] Built service openshift-kube-storage-version-migrator-operator/metrics cluster-wide LB for netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:18:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.128296 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.141991 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.155314 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.166634 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.168438 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.168468 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.168482 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.168505 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.168520 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.181669 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.193778 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.209238 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.223987 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.250688 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200c01ea-5be9-4295-b8c7-19cd0979314c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748fbba6db4ab0b5e79078734446f3a00090bebae2f4591ba9f1bc17e6bb8a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06b4ee0080e1c65f8310ac1207a98c59b15714c666839a390da6f8e4555563e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc2b61c48cd69c4512e49fc0ec433dfcffd9a50bd7fe6fa5aac2693a55ac1f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8e42ad11b473d9e0eb5d2a6c15fa6425b3f0980bc463845723402abef1d616c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d0e73acc6693630ed8b0b44cbd4728f5f634d0c8eb2919b4b608c2167905b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.268233 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.271441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.271527 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.271553 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.271585 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.271606 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.287603 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.302650 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.318221 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"2026-01-28T15:17:26+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3\\\\n2026-01-28T15:17:26+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3 to /host/opt/cni/bin/\\\\n2026-01-28T15:17:26Z [verbose] multus-daemon started\\\\n2026-01-28T15:17:26Z [verbose] Readiness Indicator file check\\\\n2026-01-28T15:18:11Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:18:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:23Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.375012 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.375063 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.375075 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.375094 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.375126 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.479151 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.479206 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.479220 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.479243 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.479257 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.505584 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 04:18:07.496121099 +0000 UTC Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.582135 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.582182 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.582191 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.582208 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.582221 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.586845 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:23 crc kubenswrapper[4959]: E0128 15:18:23.586977 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.587263 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:23 crc kubenswrapper[4959]: E0128 15:18:23.587476 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.685412 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.685479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.685492 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.685509 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.685520 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.789068 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.789129 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.789142 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.789162 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.789173 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.891826 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.891904 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.891913 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.891928 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.891939 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.994799 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.994867 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.994881 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.994904 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:23 crc kubenswrapper[4959]: I0128 15:18:23.994916 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:23Z","lastTransitionTime":"2026-01-28T15:18:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.098600 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.098693 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.098718 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.098753 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.098775 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.202717 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.202770 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.202782 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.202803 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.202818 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.306645 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.306737 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.306752 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.306775 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.306788 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.341600 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.341815 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.341896 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.341844531 +0000 UTC m=+151.787750944 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.342057 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342162 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342207 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342237 4959 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342276 4959 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.342166 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342346 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.342309503 +0000 UTC m=+151.788216086 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342396 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.342375535 +0000 UTC m=+151.788282128 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.342441 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342578 4959 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342573 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342653 4959 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342680 4959 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342741 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.342716683 +0000 UTC m=+151.788623276 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.342785 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.342766334 +0000 UTC m=+151.788672997 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.410148 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.410179 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.410189 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.410205 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.410216 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.506003 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 13:08:54.702583542 +0000 UTC Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.513353 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.513421 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.513432 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.513454 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.513472 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.586096 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.586365 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.586420 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:24 crc kubenswrapper[4959]: E0128 15:18:24.586607 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.615921 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.615958 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.615966 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.615980 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.615989 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.717932 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.717971 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.717979 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.717992 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.718001 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.821006 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.821064 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.821090 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.821197 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.821253 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.923723 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.923762 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.923773 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.923791 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:24 crc kubenswrapper[4959]: I0128 15:18:24.923802 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:24Z","lastTransitionTime":"2026-01-28T15:18:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.025771 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.025836 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.025848 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.025865 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.025876 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.127860 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.127900 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.127911 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.127927 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.127939 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.230121 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.230161 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.230172 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.230187 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.230196 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.332621 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.332681 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.332691 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.332707 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.332718 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.436415 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.436470 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.436483 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.436501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.436512 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.506676 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 18:01:31.498690844 +0000 UTC Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.538628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.538673 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.538682 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.538698 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.538709 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.586310 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.586363 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:25 crc kubenswrapper[4959]: E0128 15:18:25.586428 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:25 crc kubenswrapper[4959]: E0128 15:18:25.586511 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.641452 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.641517 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.641539 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.641566 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.641585 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.744512 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.744564 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.744577 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.744595 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.744610 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.848274 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.848313 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.848325 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.848340 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.848352 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.951369 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.951440 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.951464 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.951503 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:25 crc kubenswrapper[4959]: I0128 15:18:25.951529 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:25Z","lastTransitionTime":"2026-01-28T15:18:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.054650 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.054686 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.054695 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.054711 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.054720 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.158425 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.158479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.158494 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.158513 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.158523 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.260916 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.260967 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.260977 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.260990 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.260999 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.363369 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.363403 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.363412 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.363425 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.363435 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.465712 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.465757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.465767 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.465781 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.465794 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.507124 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 10:18:15.050083705 +0000 UTC Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.567500 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.567550 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.567562 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.567580 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.567591 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.586871 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.587010 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:26 crc kubenswrapper[4959]: E0128 15:18:26.587047 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:26 crc kubenswrapper[4959]: E0128 15:18:26.587279 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.671187 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.671240 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.671272 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.671352 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.671394 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.775138 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.775213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.775238 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.775276 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.775301 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.878277 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.878348 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.878364 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.878391 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.878409 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.981629 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.981673 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.981683 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.981701 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:26 crc kubenswrapper[4959]: I0128 15:18:26.981712 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:26Z","lastTransitionTime":"2026-01-28T15:18:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.083980 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.084026 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.084037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.084054 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.084063 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.186857 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.186971 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.186982 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.186998 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.187009 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.290211 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.290260 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.290269 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.290285 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.290297 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.392588 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.392640 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.392649 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.392665 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.392674 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.494218 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.494264 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.494277 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.494292 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.494305 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.507849 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 23:45:33.646397426 +0000 UTC Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.586783 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.586819 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:27 crc kubenswrapper[4959]: E0128 15:18:27.586960 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:27 crc kubenswrapper[4959]: E0128 15:18:27.587032 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.596579 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.596628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.596643 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.596663 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.596678 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.699168 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.699213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.699225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.699242 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.699254 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.801730 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.801778 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.801787 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.801802 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.801812 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.904659 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.904726 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.904740 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.904759 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.904773 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.952903 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.952945 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.952953 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.952967 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.952976 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: E0128 15:18:27.965322 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.969065 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.969137 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.969151 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.969166 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.969194 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: E0128 15:18:27.979958 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.983224 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.983269 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.983281 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.983299 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.983311 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:27 crc kubenswrapper[4959]: E0128 15:18:27.994045 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:27Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.997315 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.997351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.997362 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.997380 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:27 crc kubenswrapper[4959]: I0128 15:18:27.997394 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:27Z","lastTransitionTime":"2026-01-28T15:18:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: E0128 15:18:28.008229 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.011602 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.011636 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.011644 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.011657 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.011669 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: E0128 15:18:28.023405 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:28Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:28 crc kubenswrapper[4959]: E0128 15:18:28.023573 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.024868 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.024900 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.024911 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.024928 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.024939 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.127433 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.127756 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.127765 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.127780 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.127792 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.229664 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.229702 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.229713 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.229729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.229739 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.332125 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.332158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.332168 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.332188 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.332204 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.435539 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.435600 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.435611 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.435625 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.435635 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.508918 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 07:37:34.698868354 +0000 UTC Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.538143 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.538201 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.538219 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.538277 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.538295 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.586704 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.586769 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:28 crc kubenswrapper[4959]: E0128 15:18:28.586885 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:28 crc kubenswrapper[4959]: E0128 15:18:28.587087 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.642260 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.642333 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.642351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.642375 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.642397 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.745683 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.745721 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.745734 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.745750 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.745761 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.849503 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.849568 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.849581 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.849604 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.849619 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.954644 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.954707 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.954720 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.954743 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:28 crc kubenswrapper[4959]: I0128 15:18:28.954758 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:28Z","lastTransitionTime":"2026-01-28T15:18:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.058400 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.058450 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.058457 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.058474 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.058482 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.162413 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.162464 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.162474 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.162492 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.162507 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.265576 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.265643 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.265657 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.265681 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.265696 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.370578 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.370625 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.370636 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.370662 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.370675 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.474145 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.474210 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.474224 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.474243 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.474255 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.510132 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 23:56:58.249720346 +0000 UTC Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.577420 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.577458 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.577470 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.577485 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.577495 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.586858 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.586912 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:29 crc kubenswrapper[4959]: E0128 15:18:29.587051 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:29 crc kubenswrapper[4959]: E0128 15:18:29.587267 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.681249 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.681679 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.681864 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.682044 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.682252 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.786351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.786419 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.786442 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.786476 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.786516 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.890892 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.890942 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.890952 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.890971 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.890982 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.994727 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.994772 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.994781 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.994797 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:29 crc kubenswrapper[4959]: I0128 15:18:29.994806 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:29Z","lastTransitionTime":"2026-01-28T15:18:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.097187 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.097229 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.097241 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.097256 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.097267 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.200351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.200402 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.200416 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.200435 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.200450 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.302772 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.302848 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.302862 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.302878 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.302888 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.405021 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.405083 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.405173 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.405198 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.405211 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.508085 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.508155 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.508167 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.508183 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.508196 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.510440 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 11:25:16.253208487 +0000 UTC Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.587358 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.587365 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:30 crc kubenswrapper[4959]: E0128 15:18:30.587638 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:30 crc kubenswrapper[4959]: E0128 15:18:30.587780 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.601800 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.610923 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.610959 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.610969 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.610984 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.610997 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.617749 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.630670 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.644384 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"2026-01-28T15:17:26+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3\\\\n2026-01-28T15:17:26+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3 to /host/opt/cni/bin/\\\\n2026-01-28T15:17:26Z [verbose] multus-daemon started\\\\n2026-01-28T15:17:26Z [verbose] Readiness Indicator file check\\\\n2026-01-28T15:18:11Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:18:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.656622 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.675285 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.688209 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.710321 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200c01ea-5be9-4295-b8c7-19cd0979314c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748fbba6db4ab0b5e79078734446f3a00090bebae2f4591ba9f1bc17e6bb8a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06b4ee0080e1c65f8310ac1207a98c59b15714c666839a390da6f8e4555563e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc2b61c48cd69c4512e49fc0ec433dfcffd9a50bd7fe6fa5aac2693a55ac1f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8e42ad11b473d9e0eb5d2a6c15fa6425b3f0980bc463845723402abef1d616c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d0e73acc6693630ed8b0b44cbd4728f5f634d0c8eb2919b4b608c2167905b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.714075 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.714159 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.714175 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.714208 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.714223 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.729001 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.741538 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.752884 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.774636 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:21Z\\\",\\\"message\\\":\\\"t-network-operator/iptables-alerter-4ln5h after 0 failed attempt(s)\\\\nI0128 15:18:21.508499 7063 default_network_controller.go:776] Recording success event on pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 15:18:21.508329 7063 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-controllers for network=default are: map[]\\\\nF0128 15:18:21.508507 7063 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z]\\\\nI0128 15:18:21.508502 7063 services_controller.go:451] Built service openshift-kube-storage-version-migrator-operator/metrics cluster-wide LB for netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:18:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.792263 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.816593 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.816642 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.816653 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.816668 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.816680 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.829241 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.846796 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.860784 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.871725 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.884885 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.896811 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:30Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.920054 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.920094 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.920308 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.920345 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:30 crc kubenswrapper[4959]: I0128 15:18:30.920357 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:30Z","lastTransitionTime":"2026-01-28T15:18:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.023312 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.023361 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.023373 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.023389 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.023399 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.125848 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.125903 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.125913 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.125930 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.125939 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.229998 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.230077 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.230097 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.230152 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.230171 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.332812 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.332876 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.332888 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.332912 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.332929 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.436305 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.436380 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.436392 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.436413 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.436427 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.511561 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 19:39:46.994548862 +0000 UTC Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.540620 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.540693 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.540709 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.540736 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.540752 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.587749 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.587840 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:31 crc kubenswrapper[4959]: E0128 15:18:31.588067 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:31 crc kubenswrapper[4959]: E0128 15:18:31.588081 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.643614 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.643704 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.643731 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.643768 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.643798 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.747282 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.747333 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.747342 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.747360 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.747370 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.850637 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.850691 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.850700 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.850714 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.850725 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.953395 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.953452 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.953462 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.953479 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:31 crc kubenswrapper[4959]: I0128 15:18:31.953490 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:31Z","lastTransitionTime":"2026-01-28T15:18:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.056619 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.056658 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.056671 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.056686 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.056696 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.158795 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.158828 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.158838 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.158851 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.158860 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.260883 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.260928 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.260938 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.260956 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.260968 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.363389 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.363430 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.363443 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.363458 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.363469 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.465509 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.465569 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.465583 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.465601 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.465615 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.512264 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 20:09:57.401193598 +0000 UTC Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.568269 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.568303 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.568312 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.568324 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.568335 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.587045 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.587184 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:32 crc kubenswrapper[4959]: E0128 15:18:32.587277 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:32 crc kubenswrapper[4959]: E0128 15:18:32.587402 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.670599 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.670652 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.670666 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.670684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.670696 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.773780 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.773816 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.773826 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.773841 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.773865 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.876933 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.876978 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.876987 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.877008 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.877018 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.980372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.980455 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.980475 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.980511 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:32 crc kubenswrapper[4959]: I0128 15:18:32.980532 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:32Z","lastTransitionTime":"2026-01-28T15:18:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.083550 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.083617 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.083631 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.083649 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.083659 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.185984 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.186036 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.186050 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.186072 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.186084 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.287974 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.288012 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.288022 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.288036 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.288045 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.390521 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.390556 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.390566 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.390586 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.390604 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.493407 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.493493 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.493516 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.493553 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.493580 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.512568 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 05:02:27.624465577 +0000 UTC Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.587065 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.587190 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:33 crc kubenswrapper[4959]: E0128 15:18:33.587246 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:33 crc kubenswrapper[4959]: E0128 15:18:33.587499 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.596145 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.596228 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.596252 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.596280 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.596300 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.698947 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.699016 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.699026 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.699042 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.699076 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.801901 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.801955 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.801970 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.801989 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.802001 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.904471 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.904555 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.904568 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.904584 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:33 crc kubenswrapper[4959]: I0128 15:18:33.904596 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:33Z","lastTransitionTime":"2026-01-28T15:18:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.007480 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.007540 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.007552 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.007581 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.007595 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.109559 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.109591 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.109600 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.109614 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.109623 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.211897 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.211933 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.211941 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.211956 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.211976 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.314371 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.314408 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.314416 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.314428 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.314437 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.416883 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.416925 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.416937 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.416955 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.416968 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.513535 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 11:48:43.136785307 +0000 UTC Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.520533 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.520639 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.520665 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.520703 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.520726 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.586333 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.586434 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:34 crc kubenswrapper[4959]: E0128 15:18:34.586675 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:34 crc kubenswrapper[4959]: E0128 15:18:34.587023 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.624429 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.624503 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.624515 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.624538 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.624551 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.727866 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.727910 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.727919 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.727936 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.727947 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.832534 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.832616 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.832638 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.832668 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.832688 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.935698 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.935744 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.935757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.935773 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:34 crc kubenswrapper[4959]: I0128 15:18:34.935783 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:34Z","lastTransitionTime":"2026-01-28T15:18:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.039156 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.039228 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.039245 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.039272 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.039290 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.141799 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.141840 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.141848 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.141862 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.141872 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.245023 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.245074 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.245089 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.245133 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.245148 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.348359 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.348586 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.348703 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.348783 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.348847 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.452360 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.452416 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.452429 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.452456 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.452471 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.513922 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 23:18:07.088479157 +0000 UTC Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.556166 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.556207 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.556220 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.556237 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.556247 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.586601 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.586947 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:35 crc kubenswrapper[4959]: E0128 15:18:35.586967 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:35 crc kubenswrapper[4959]: E0128 15:18:35.587426 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.659891 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.659934 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.659946 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.659961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.659975 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.763565 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.763877 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.763961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.764035 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.764139 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.867310 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.867349 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.867358 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.867372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.867382 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.970147 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.970426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.970493 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.970578 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:35 crc kubenswrapper[4959]: I0128 15:18:35.970666 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:35Z","lastTransitionTime":"2026-01-28T15:18:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.073203 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.073996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.074094 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.074223 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.074307 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.177503 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.177578 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.177598 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.177630 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.177651 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.281807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.281885 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.281910 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.281946 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.281972 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.385449 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.385547 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.385572 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.385603 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.385631 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.489949 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.490691 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.490718 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.490756 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.490782 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.514552 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 17:20:40.076478358 +0000 UTC Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.586268 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.586397 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:36 crc kubenswrapper[4959]: E0128 15:18:36.586686 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:36 crc kubenswrapper[4959]: E0128 15:18:36.586566 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.593994 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.594066 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.594087 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.594145 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.594166 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.698488 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.698555 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.698573 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.698606 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.698625 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.802766 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.802847 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.802866 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.802948 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.802971 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.905278 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.905313 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.905320 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.905331 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:36 crc kubenswrapper[4959]: I0128 15:18:36.905340 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:36Z","lastTransitionTime":"2026-01-28T15:18:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.007922 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.007982 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.007998 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.008024 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.008041 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.112457 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.112569 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.112595 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.112634 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.112657 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.220293 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.220370 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.220390 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.220418 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.220438 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.325864 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.325912 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.325926 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.325946 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.325958 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.429406 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.429481 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.429499 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.429528 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.429548 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.515326 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 09:46:56.119655832 +0000 UTC Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.532916 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.532968 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.532979 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.532996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.533008 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.586497 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.586575 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:37 crc kubenswrapper[4959]: E0128 15:18:37.586932 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:37 crc kubenswrapper[4959]: E0128 15:18:37.587160 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.634838 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.634868 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.634877 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.634892 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.634903 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.737000 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.737025 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.737032 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.737045 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.737053 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.839666 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.839706 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.839717 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.839734 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.839747 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.942665 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.942698 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.942709 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.942726 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:37 crc kubenswrapper[4959]: I0128 15:18:37.942737 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:37Z","lastTransitionTime":"2026-01-28T15:18:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.045047 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.045137 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.045148 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.045164 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.045177 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.095249 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.095348 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.095356 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.095371 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.095382 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.116979 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.122460 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.122546 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.122564 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.122597 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.122618 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.137008 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.141799 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.141849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.141862 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.141888 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.141906 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.162888 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.168288 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.168335 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.168350 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.168373 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.168388 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.186972 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.192485 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.192551 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.192561 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.192577 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.192587 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.209055 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:38Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.209244 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.211030 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.211072 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.211084 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.211119 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.211132 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.313825 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.313868 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.313876 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.313890 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.313901 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.416476 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.416522 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.416538 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.416558 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.416576 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.516509 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 15:00:28.8428062 +0000 UTC Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.519975 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.520071 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.520146 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.520179 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.520204 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.586979 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.586979 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.587166 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.587230 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.587884 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:18:38 crc kubenswrapper[4959]: E0128 15:18:38.588024 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.623310 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.623360 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.623370 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.623387 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.623399 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.726711 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.726778 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.726791 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.726807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.726820 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.830364 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.830416 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.830426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.830447 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.830459 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.932899 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.932948 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.932965 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.932982 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:38 crc kubenswrapper[4959]: I0128 15:18:38.932993 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:38Z","lastTransitionTime":"2026-01-28T15:18:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.036321 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.036369 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.036381 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.036400 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.036413 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.139009 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.139134 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.139153 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.139183 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.139205 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.241684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.241721 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.241729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.241742 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.241752 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.344291 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.344334 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.344346 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.344361 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.344371 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.447588 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.447673 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.447697 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.447729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.447750 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.517457 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 10:47:52.108491897 +0000 UTC Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.550867 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.550916 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.550930 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.550947 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.550958 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.586651 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.586723 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:39 crc kubenswrapper[4959]: E0128 15:18:39.586780 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:39 crc kubenswrapper[4959]: E0128 15:18:39.586999 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.653697 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.653775 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.653793 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.653810 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.653820 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.756597 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.756645 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.756656 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.756675 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.756684 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.859721 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.859762 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.859776 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.859797 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.859812 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.962681 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.962747 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.962765 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.962792 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:39 crc kubenswrapper[4959]: I0128 15:18:39.962869 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:39Z","lastTransitionTime":"2026-01-28T15:18:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.065224 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.065260 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.065271 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.065287 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.065297 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.168206 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.168253 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.168269 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.168289 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.168303 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.270796 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.270868 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.270877 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.270891 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.270900 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.374391 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.374451 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.374463 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.374477 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.374486 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.478068 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.478127 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.478139 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.478157 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.478169 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.518550 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 00:16:10.63116492 +0000 UTC Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.581982 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.582043 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.582060 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.582092 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.582140 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.586239 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.586339 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:40 crc kubenswrapper[4959]: E0128 15:18:40.586416 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:40 crc kubenswrapper[4959]: E0128 15:18:40.586544 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.602948 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cd485f38-ca6f-4ee9-9428-cf7cf5335f94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a5d22ae537876bac6d29bd44859f0699afdbe44085cbf63a58e72bb5593e841\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fd6218f693d06ca3388719946b9cd7446cd03c31856d1375a2d19ee606270434\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.626055 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"200c01ea-5be9-4295-b8c7-19cd0979314c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748fbba6db4ab0b5e79078734446f3a00090bebae2f4591ba9f1bc17e6bb8a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06b4ee0080e1c65f8310ac1207a98c59b15714c666839a390da6f8e4555563e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc2b61c48cd69c4512e49fc0ec433dfcffd9a50bd7fe6fa5aac2693a55ac1f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8e42ad11b473d9e0eb5d2a6c15fa6425b3f0980bc463845723402abef1d616c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d0e73acc6693630ed8b0b44cbd4728f5f634d0c8eb2919b4b608c2167905b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f5be144aa7809948141c510d2160186c9a37bb506ffe07a1db0963818cc7288\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bba8a64b307b1042705d4092e9f8db8a2b879a1768c1dfdc62e60dedfee0ee18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ff0d27d99b58551728ff39375adf147063226adca7cc7f6bf2bf93d1f27bff6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.649906 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.674230 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9d3c3af802b64678f23604b66562be2f5765786a126e5b04e99bf1ee6aa293ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.684435 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.684511 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.684522 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.684542 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.684558 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.692926 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.709793 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bbjnj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c1dca0a-c782-43f9-9390-7dc9c5311b97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:11Z\\\",\\\"message\\\":\\\"2026-01-28T15:17:26+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3\\\\n2026-01-28T15:17:26+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_118ca8e1-527d-4610-874a-c8a8d54fe8d3 to /host/opt/cni/bin/\\\\n2026-01-28T15:17:26Z [verbose] multus-daemon started\\\\n2026-01-28T15:17:26Z [verbose] Readiness Indicator file check\\\\n2026-01-28T15:18:11Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:18:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sk8s8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bbjnj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.729740 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-j879q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0238833-fbe2-4738-8079-14957d0506f7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15293c781cd4853fc1a4cfc04830fcd995e2ff71cbc8ac9fe4d0d2bf75ab69ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7p42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-j879q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.749850 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7bcfd52f-63d1-4c3a-acf8-dec05d62f3ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58b7feda6d4c7a51596cdce80179f42431b43282154b292952dfb58ab003c7cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8e86f596f023d165f035e1065b9d0c0708f4f632a961dbb32cdc73daaee0e5e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3f7f87ecb9f571e1c886beee676fe1093a270db122167f38fe3fc80a510dcd26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba9e40d88fbd308f6ef62cf1694dd15239f732d2a0776467cc6681e2b243bc0b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e51873f18edadc92e4aaa9e1eeca45f88d0cfbd2d72d00e089e25dc8a5fe3b1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fc500af7b0b5d39538ca27815c27ab4af4f4fdf152e06469b121e2a8d362be3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://156cdb290556b527cf65882bac0da9560e8799f9b4bdade37d5a780db40ff1ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w8lnq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b8kbq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.764186 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42bda473-f030-45fd-99a9-bbe18a224ae3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b87f64a887077378396fb8321f60369dd53ed980af5194b4468837fcfcf900c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb8cf60b55c335d519531579e3b0a8ca0352806298b6e8a6c3928e65e3bf8c2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlmtc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:36Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-dsmtj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.783656 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"de661d79-41b6-48ee-9f92-2e3498c43510\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26746a5a908dac2540fb6f6d432fcf291ee57642ac0ae6030bd490fc4ef7250\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b59648578a87ca1c464d0ff3a3107719dd14042687996b797df6833209009d53\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://13465bfb36e42d7f23ffecd5667a2ef338015c096f83f8eec7d8a5ceec2b1ccf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.788056 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.788089 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.788099 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.788135 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.788146 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.798851 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66d44fdc689af769f7b92e5556c2d5ccf745ee53436e908ca772fae7176dea27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f4f6a802adc172387d9d45296c8f399c8bfe89207686050dcbeb3f57cc625f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.813750 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9732bd7c244182b51c0793a4b27506b4e1725596373d3cd3ffee5190d8e0f503\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.827449 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f22b9702-cd33-405b-9cea-babf675908f5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10674e204482757e733645f03d076f896eedbd57f887d657255e0a2037c0d28c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jsz26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-r75mw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.850444 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bad991a-9aad-4e7b-abdd-7d23124f60a8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:24Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T15:18:21Z\\\",\\\"message\\\":\\\"t-network-operator/iptables-alerter-4ln5h after 0 failed attempt(s)\\\\nI0128 15:18:21.508499 7063 default_network_controller.go:776] Recording success event on pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI0128 15:18:21.508329 7063 lb_config.go:1031] Cluster endpoints for openshift-machine-api/machine-api-controllers for network=default are: map[]\\\\nF0128 15:18:21.508507 7063 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:21Z is after 2025-08-24T17:21:41Z]\\\\nI0128 15:18:21.508502 7063 services_controller.go:451] Built service openshift-kube-storage-version-migrator-operator/metrics cluster-wide LB for netwo\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:18:20Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5t2sz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mvzjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.861232 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-xg4vp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d40d4ce-1825-487f-a8d8-7c1c68811757\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3df806cdbb4711c25148145df67e809976460e2978cda792a722af48f111e29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6m6k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-xg4vp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.874708 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"db03bd23-3b09-4f78-a35a-d219c7a948e4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T15:17:14Z\\\",\\\"message\\\":\\\"W0128 15:17:03.701791 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0128 15:17:03.702061 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769613423 cert, and key in /tmp/serving-cert-792779462/serving-signer.crt, /tmp/serving-cert-792779462/serving-signer.key\\\\nI0128 15:17:03.917966 1 observer_polling.go:159] Starting file observer\\\\nW0128 15:17:03.922397 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0128 15:17:03.922660 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 15:17:03.923989 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-792779462/tls.crt::/tmp/serving-cert-792779462/tls.key\\\\\\\"\\\\nF0128 15:17:14.290980 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.887727 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"299675b3-cb57-48e0-8891-3702849cc44c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5fe0931e2420366bdc822c387140ae7691ebc8426628a7ad8aa71f13336f1621\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad4ced597eb4184fa6010a7cbba1d8829467878dd3e0b16ba9501515948f9562\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://773c3c96e64f20a59c4c204e67c9cf3dc3fd7c31377a74e15be676d55a35d358\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T15:17:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://05c08389d37dc61b8b3b12cea2a092841cf63deac27c7265eef7c72b9909af0a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T15:17:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T15:17:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.890574 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.890620 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.890633 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.890646 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.890657 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.907748 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.927465 4959 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"943bb4d7-0907-4b19-b9e0-580af6061632\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T15:17:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tjqbn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T15:17:38Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4d9tj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:40Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.992896 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.992965 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.992977 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.993000 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:40 crc kubenswrapper[4959]: I0128 15:18:40.993016 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:40Z","lastTransitionTime":"2026-01-28T15:18:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.094710 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.094752 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.094764 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.094780 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.094791 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.197530 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.197582 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.197594 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.197621 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.197636 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.300030 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.300069 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.300078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.300092 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.300133 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.403009 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.403141 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.403183 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.403225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.403250 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.507249 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.507327 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.507365 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.507395 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.507414 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.519336 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 04:08:08.528528972 +0000 UTC Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.587254 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.587352 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:41 crc kubenswrapper[4959]: E0128 15:18:41.587525 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:41 crc kubenswrapper[4959]: E0128 15:18:41.587371 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.610265 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.610321 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.610333 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.610358 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.610385 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.714935 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.715005 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.715023 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.715050 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.715072 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.818774 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.818833 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.818845 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.818873 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.818889 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.922229 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.922315 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.922344 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.922450 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:41 crc kubenswrapper[4959]: I0128 15:18:41.922481 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:41Z","lastTransitionTime":"2026-01-28T15:18:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.026496 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.026561 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.026579 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.026605 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.026625 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.133699 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:42 crc kubenswrapper[4959]: E0128 15:18:42.134033 4959 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:18:42 crc kubenswrapper[4959]: E0128 15:18:42.134329 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs podName:943bb4d7-0907-4b19-b9e0-580af6061632 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:46.134216041 +0000 UTC m=+169.580122464 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs") pod "network-metrics-daemon-4d9tj" (UID: "943bb4d7-0907-4b19-b9e0-580af6061632") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.136501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.136543 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.136554 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.136572 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.136584 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.239493 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.239549 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.239560 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.239584 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.239597 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.346476 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.346729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.346757 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.346789 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.346808 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.449990 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.450047 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.450060 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.450081 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.450092 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.520458 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 18:59:48.303356859 +0000 UTC Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.553822 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.553898 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.553917 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.553946 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.553969 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.586834 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.586891 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:42 crc kubenswrapper[4959]: E0128 15:18:42.587092 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:42 crc kubenswrapper[4959]: E0128 15:18:42.587502 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.657632 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.657715 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.657750 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.657778 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.657794 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.760915 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.760949 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.760960 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.760979 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.760997 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.863575 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.863613 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.863623 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.863641 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.863653 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.967139 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.967193 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.967207 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.967231 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:42 crc kubenswrapper[4959]: I0128 15:18:42.967248 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:42Z","lastTransitionTime":"2026-01-28T15:18:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.070199 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.070274 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.070293 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.070319 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.070343 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.173298 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.173350 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.173362 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.173384 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.173397 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.276432 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.276492 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.276511 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.276727 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.276749 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.379892 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.379981 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.380008 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.380041 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.380064 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.483592 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.483651 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.483671 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.483697 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.483718 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.521597 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 13:34:22.07125238 +0000 UTC Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.586068 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.586206 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:43 crc kubenswrapper[4959]: E0128 15:18:43.586401 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:43 crc kubenswrapper[4959]: E0128 15:18:43.586214 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.586862 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.586970 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.587001 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.587037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.587066 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.690241 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.690319 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.690344 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.690377 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.690403 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.794831 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.794896 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.794914 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.794940 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.794960 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.896913 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.896959 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.896970 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.896983 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.896994 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.999648 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.999705 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:43 crc kubenswrapper[4959]: I0128 15:18:43.999720 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:43.999737 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:43.999746 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:43Z","lastTransitionTime":"2026-01-28T15:18:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.101803 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.101838 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.101849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.101865 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.101875 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.203872 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.203909 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.203919 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.203933 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.203942 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.306296 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.306338 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.306348 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.306362 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.306372 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.408447 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.408488 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.408498 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.408514 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.408525 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.512243 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.512335 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.512362 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.512396 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.512425 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.522783 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 03:52:49.06759379 +0000 UTC Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.586479 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.586610 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:44 crc kubenswrapper[4959]: E0128 15:18:44.586620 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:44 crc kubenswrapper[4959]: E0128 15:18:44.586870 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.616283 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.616348 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.616367 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.616391 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.616410 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.720501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.720557 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.720569 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.720591 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.720604 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.824022 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.824071 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.824082 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.824099 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.824128 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.926388 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.926445 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.926458 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.926477 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:44 crc kubenswrapper[4959]: I0128 15:18:44.926490 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:44Z","lastTransitionTime":"2026-01-28T15:18:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.029426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.029488 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.029502 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.029521 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.029535 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.131090 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.131158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.131170 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.131187 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.131201 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.234264 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.234311 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.234322 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.234338 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.234348 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.336497 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.336535 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.336545 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.336560 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.336569 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.438977 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.439019 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.439029 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.439042 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.439052 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.523517 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 22:04:36.777336706 +0000 UTC Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.541367 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.541422 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.541435 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.541455 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.541469 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.586886 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:45 crc kubenswrapper[4959]: E0128 15:18:45.587019 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.586892 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:45 crc kubenswrapper[4959]: E0128 15:18:45.587175 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.643822 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.643859 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.643869 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.643880 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.643890 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.745913 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.745945 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.745954 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.745967 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.745975 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.848791 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.848860 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.848882 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.848912 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.848934 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.951845 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.952119 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.952227 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.952306 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:45 crc kubenswrapper[4959]: I0128 15:18:45.952384 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:45Z","lastTransitionTime":"2026-01-28T15:18:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.054609 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.054660 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.054672 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.054689 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.054698 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.157824 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.157862 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.157871 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.157888 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.157898 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.266532 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.267139 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.267151 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.267173 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.267187 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.370315 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.370371 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.370387 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.370408 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.370423 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.472937 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.472974 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.472985 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.473001 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.473011 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.524604 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 20:25:58.167203641 +0000 UTC Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.576225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.576279 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.576293 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.576313 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.576328 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.587528 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.587631 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:46 crc kubenswrapper[4959]: E0128 15:18:46.587761 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:46 crc kubenswrapper[4959]: E0128 15:18:46.587910 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.679088 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.679190 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.679202 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.679227 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.679246 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.783000 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.783076 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.783093 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.783155 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.783188 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.887306 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.887355 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.887383 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.887403 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.887415 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.990361 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.990414 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.990426 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.990445 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:46 crc kubenswrapper[4959]: I0128 15:18:46.990460 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:46Z","lastTransitionTime":"2026-01-28T15:18:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.094130 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.094200 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.094218 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.094238 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.094251 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.197784 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.197883 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.198002 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.198041 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.198071 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.302605 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.302684 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.302706 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.302736 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.302756 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.406748 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.406810 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.406824 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.406848 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.406861 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.510017 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.510082 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.510094 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.510137 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.510148 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.525100 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 01:34:34.721129398 +0000 UTC Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.586804 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:47 crc kubenswrapper[4959]: E0128 15:18:47.586998 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.586821 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:47 crc kubenswrapper[4959]: E0128 15:18:47.587333 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.613009 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.613070 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.613083 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.613123 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.613144 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.715861 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.715895 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.715904 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.715918 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.715928 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.818871 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.818927 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.818940 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.818961 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.818977 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.922457 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.922498 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.922508 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.922524 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:47 crc kubenswrapper[4959]: I0128 15:18:47.922538 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:47Z","lastTransitionTime":"2026-01-28T15:18:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.025138 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.025200 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.025208 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.025224 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.025236 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.128356 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.128423 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.128444 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.128469 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.128485 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.231158 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.231197 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.231207 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.231221 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.231231 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.333699 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.333744 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.333773 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.333791 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.333803 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.336151 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.336215 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.336226 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.336243 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.336252 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: E0128 15:18:48.360949 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:48Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.366186 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.366305 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.366347 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.366372 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.366390 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: E0128 15:18:48.384809 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:48Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.388984 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.389020 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.389028 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.389042 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.389051 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: E0128 15:18:48.402221 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:48Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.405618 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.405687 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.405703 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.405726 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.405739 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: E0128 15:18:48.417052 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:48Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.420748 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.420790 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.420799 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.420815 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.420826 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: E0128 15:18:48.432498 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T15:18:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a9652fc7-854b-4938-b708-3f704c68c5f5\\\",\\\"systemUUID\\\":\\\"dc1a7e00-f904-4756-a6c3-34f447e56131\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T15:18:48Z is after 2025-08-24T17:21:41Z" Jan 28 15:18:48 crc kubenswrapper[4959]: E0128 15:18:48.432651 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.435973 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.436009 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.436017 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.436032 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.436042 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.525783 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 00:38:28.515125949 +0000 UTC Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.539529 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.539628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.539647 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.539744 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.539758 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.586744 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.586774 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:48 crc kubenswrapper[4959]: E0128 15:18:48.587008 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:48 crc kubenswrapper[4959]: E0128 15:18:48.587384 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.642454 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.642514 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.642524 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.642542 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.642552 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.745015 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.745055 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.745066 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.745190 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.745216 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.847356 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.847405 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.847414 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.847430 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.847439 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.949914 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.949948 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.949958 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.949972 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:48 crc kubenswrapper[4959]: I0128 15:18:48.949983 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:48Z","lastTransitionTime":"2026-01-28T15:18:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.053586 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.053626 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.053634 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.053649 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.053657 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.156338 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.156389 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.156421 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.156437 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.156447 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.259289 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.259342 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.259351 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.259367 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.260157 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.363149 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.363462 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.363547 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.363656 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.363735 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.466557 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.466595 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.466603 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.466618 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.466630 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.526089 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 02:34:28.049411352 +0000 UTC Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.569226 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.569277 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.569287 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.569302 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.569312 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.586631 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.586645 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:49 crc kubenswrapper[4959]: E0128 15:18:49.586780 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:49 crc kubenswrapper[4959]: E0128 15:18:49.586922 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.672407 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.672693 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.672793 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.672890 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.672960 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.775759 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.776073 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.776177 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.776286 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.776373 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.879425 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.879483 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.879498 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.879516 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.879529 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.981727 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.981778 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.981786 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.981801 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:49 crc kubenswrapper[4959]: I0128 15:18:49.981812 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:49Z","lastTransitionTime":"2026-01-28T15:18:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.083828 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.083872 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.083883 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.083900 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.083910 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.186661 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.186720 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.186729 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.186745 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.186771 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.288755 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.288787 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.288796 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.288812 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.288820 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.391407 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.391481 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.391495 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.391516 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.391528 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.494040 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.494093 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.494135 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.494152 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.494162 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.527201 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 19:32:31.155099217 +0000 UTC Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.586874 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.586930 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:50 crc kubenswrapper[4959]: E0128 15:18:50.587027 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:50 crc kubenswrapper[4959]: E0128 15:18:50.587251 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.596208 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.596248 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.596258 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.596273 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.596283 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.623461 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=44.623440851 podStartE2EDuration="44.623440851s" podCreationTimestamp="2026-01-28 15:18:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.60920991 +0000 UTC m=+114.055116293" watchObservedRunningTime="2026-01-28 15:18:50.623440851 +0000 UTC m=+114.069347234" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.662181 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-bbjnj" podStartSLOduration=87.662154737 podStartE2EDuration="1m27.662154737s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.650638153 +0000 UTC m=+114.096544536" watchObservedRunningTime="2026-01-28 15:18:50.662154737 +0000 UTC m=+114.108061120" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.662593 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-j879q" podStartSLOduration=87.662585787 podStartE2EDuration="1m27.662585787s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.661956522 +0000 UTC m=+114.107862905" watchObservedRunningTime="2026-01-28 15:18:50.662585787 +0000 UTC m=+114.108492190" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.698454 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.698499 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.698521 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.698538 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.698549 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.701210 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-b8kbq" podStartSLOduration=87.70118882 podStartE2EDuration="1m27.70118882s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.687100863 +0000 UTC m=+114.133007266" watchObservedRunningTime="2026-01-28 15:18:50.70118882 +0000 UTC m=+114.147095223" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.727195 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=29.727175841 podStartE2EDuration="29.727175841s" podCreationTimestamp="2026-01-28 15:18:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.726191248 +0000 UTC m=+114.172097641" watchObservedRunningTime="2026-01-28 15:18:50.727175841 +0000 UTC m=+114.173082234" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.727400 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-dsmtj" podStartSLOduration=86.727395787 podStartE2EDuration="1m26.727395787s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.701924818 +0000 UTC m=+114.147831201" watchObservedRunningTime="2026-01-28 15:18:50.727395787 +0000 UTC m=+114.173302170" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.765046 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podStartSLOduration=87.765029876 podStartE2EDuration="1m27.765029876s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.764057252 +0000 UTC m=+114.209963655" watchObservedRunningTime="2026-01-28 15:18:50.765029876 +0000 UTC m=+114.210936259" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.797874 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-xg4vp" podStartSLOduration=87.797852776 podStartE2EDuration="1m27.797852776s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.797835296 +0000 UTC m=+114.243741679" watchObservedRunningTime="2026-01-28 15:18:50.797852776 +0000 UTC m=+114.243759159" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.800807 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.800859 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.800870 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.800887 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.800899 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.812672 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=90.812650291 podStartE2EDuration="1m30.812650291s" podCreationTimestamp="2026-01-28 15:17:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.811644147 +0000 UTC m=+114.257550550" watchObservedRunningTime="2026-01-28 15:18:50.812650291 +0000 UTC m=+114.258556684" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.863400 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=91.863381064 podStartE2EDuration="1m31.863381064s" podCreationTimestamp="2026-01-28 15:17:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.862517053 +0000 UTC m=+114.308423456" watchObservedRunningTime="2026-01-28 15:18:50.863381064 +0000 UTC m=+114.309287447" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.874336 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=59.874316443 podStartE2EDuration="59.874316443s" podCreationTimestamp="2026-01-28 15:17:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:18:50.873829872 +0000 UTC m=+114.319736275" watchObservedRunningTime="2026-01-28 15:18:50.874316443 +0000 UTC m=+114.320222836" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.903031 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.903062 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.903072 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.903086 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:50 crc kubenswrapper[4959]: I0128 15:18:50.903095 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:50Z","lastTransitionTime":"2026-01-28T15:18:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.006162 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.006218 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.006231 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.006254 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.006268 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.107962 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.108009 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.108022 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.108040 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.108053 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.210592 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.210657 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.210670 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.210685 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.210697 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.312014 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.312050 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.312059 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.312073 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.312082 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.414218 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.414278 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.414297 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.414331 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.414352 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.517395 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.517431 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.517441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.517456 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.517467 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.527589 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 21:57:32.868662641 +0000 UTC Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.586359 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.586450 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:51 crc kubenswrapper[4959]: E0128 15:18:51.586804 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:51 crc kubenswrapper[4959]: E0128 15:18:51.586993 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.587177 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:18:51 crc kubenswrapper[4959]: E0128 15:18:51.587296 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-mvzjl_openshift-ovn-kubernetes(1bad991a-9aad-4e7b-abdd-7d23124f60a8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.619970 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.620014 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.620023 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.620039 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.620048 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.722331 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.722394 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.722409 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.722430 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.722443 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.824969 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.825018 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.825029 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.825051 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.825063 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.928225 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.928281 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.928297 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.928322 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:51 crc kubenswrapper[4959]: I0128 15:18:51.928338 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:51Z","lastTransitionTime":"2026-01-28T15:18:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.031041 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.031081 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.031092 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.031123 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.031135 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.133812 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.133849 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.133860 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.133902 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.133925 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.240588 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.240642 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.240659 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.240677 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.240690 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.343370 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.343409 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.343419 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.343436 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.343446 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.445978 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.446033 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.446044 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.446062 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.446074 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.528387 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 05:51:13.927883164 +0000 UTC Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.548475 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.548537 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.548553 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.548578 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.548594 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.586984 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:52 crc kubenswrapper[4959]: E0128 15:18:52.587217 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.587307 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:52 crc kubenswrapper[4959]: E0128 15:18:52.587638 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.651484 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.651520 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.651530 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.651546 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.651556 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.753710 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.753758 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.753772 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.753794 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.753805 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.856061 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.856138 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.856151 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.856168 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.856180 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.958280 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.958323 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.958334 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.958353 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:52 crc kubenswrapper[4959]: I0128 15:18:52.958363 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:52Z","lastTransitionTime":"2026-01-28T15:18:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.060370 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.060422 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.060442 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.060462 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.060473 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.162996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.163034 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.163042 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.163058 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.163068 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.265400 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.265449 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.265459 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.265474 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.265484 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.367823 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.367874 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.367882 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.367901 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.367912 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.470601 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.470643 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.470652 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.470668 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.470677 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.529126 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 19:40:10.040469425 +0000 UTC Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.573043 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.573078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.573087 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.573142 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.573161 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.586405 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.586419 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:53 crc kubenswrapper[4959]: E0128 15:18:53.586529 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:53 crc kubenswrapper[4959]: E0128 15:18:53.586716 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.676397 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.676433 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.676441 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.676456 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.676467 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.779785 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.779850 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.779865 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.779885 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.779901 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.883240 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.883300 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.883309 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.883325 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.883334 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.985600 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.985638 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.985648 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.985664 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:53 crc kubenswrapper[4959]: I0128 15:18:53.985674 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:53Z","lastTransitionTime":"2026-01-28T15:18:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.088004 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.088050 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.088061 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.088078 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.088090 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.190630 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.190695 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.190710 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.190728 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.190738 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.292628 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.292663 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.292674 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.292688 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.292697 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.395056 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.395156 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.395173 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.395193 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.395205 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.497355 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.497418 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.497430 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.497447 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.497457 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.529768 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 15:33:59.334794107 +0000 UTC Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.586660 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.586683 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:54 crc kubenswrapper[4959]: E0128 15:18:54.586908 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:54 crc kubenswrapper[4959]: E0128 15:18:54.586989 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.599637 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.599680 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.599692 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.599740 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.599754 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.701986 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.702046 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.702061 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.702082 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.702124 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.804296 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.804329 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.804337 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.804352 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.804361 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.906655 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.906702 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.906713 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.906739 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:54 crc kubenswrapper[4959]: I0128 15:18:54.906751 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:54Z","lastTransitionTime":"2026-01-28T15:18:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.009157 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.009213 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.009224 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.009242 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.009253 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.111841 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.111905 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.111917 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.111938 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.111951 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.215143 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.215185 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.215207 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.215226 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.215241 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.320758 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.320800 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.320811 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.320829 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.320840 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.422885 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.422932 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.422947 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.422965 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.422976 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.525394 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.525454 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.525466 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.525483 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.525496 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.530571 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 07:30:47.93907532 +0000 UTC Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.586320 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.586386 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:55 crc kubenswrapper[4959]: E0128 15:18:55.586590 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:55 crc kubenswrapper[4959]: E0128 15:18:55.587001 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.628170 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.628206 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.628214 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.628229 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.628238 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.730417 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.730453 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.730461 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.730478 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.730488 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.833167 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.833214 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.833227 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.833246 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.833257 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.935761 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.935812 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.935821 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.935835 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:55 crc kubenswrapper[4959]: I0128 15:18:55.935844 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:55Z","lastTransitionTime":"2026-01-28T15:18:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.038631 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.038678 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.038688 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.038706 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.038717 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.140976 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.141028 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.141037 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.141056 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.141071 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.243981 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.244025 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.244036 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.244052 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.244063 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.346624 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.346674 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.346686 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.346705 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.346718 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.449015 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.449056 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.449067 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.449088 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.449098 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.531519 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 16:38:48.679493472 +0000 UTC Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.551794 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.551840 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.551848 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.551865 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.551874 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.586745 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.586827 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:56 crc kubenswrapper[4959]: E0128 15:18:56.586869 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:56 crc kubenswrapper[4959]: E0128 15:18:56.587004 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.654179 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.654249 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.654260 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.654279 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.654291 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.756698 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.756746 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.756754 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.756771 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.756780 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.859216 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.859247 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.859258 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.859274 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.859285 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.962358 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.962394 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.962432 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.962446 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:56 crc kubenswrapper[4959]: I0128 15:18:56.962455 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:56Z","lastTransitionTime":"2026-01-28T15:18:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.064474 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.064538 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.064548 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.064563 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.064573 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.167215 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.167253 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.167264 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.167280 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.167294 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.269421 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.269464 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.269475 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.269496 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.269509 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.371593 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.371637 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.371653 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.371669 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.371680 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.473481 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.473518 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.473527 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.473542 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.473551 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.532394 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 20:07:10.323732496 +0000 UTC Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.576255 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.576304 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.576313 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.576327 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.576336 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.586775 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:57 crc kubenswrapper[4959]: E0128 15:18:57.586936 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.587099 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:57 crc kubenswrapper[4959]: E0128 15:18:57.587277 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.679740 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.679786 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.679796 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.679818 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.679829 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.781921 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.781964 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.781976 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.781994 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.782009 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.884429 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.884511 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.884522 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.884538 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.884573 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.987171 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.987453 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.987501 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.987542 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:57 crc kubenswrapper[4959]: I0128 15:18:57.987554 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:57Z","lastTransitionTime":"2026-01-28T15:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.089949 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.089996 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.090004 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.090022 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.090032 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:58Z","lastTransitionTime":"2026-01-28T15:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.147314 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/1.log" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.147720 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/0.log" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.147764 4959 generic.go:334] "Generic (PLEG): container finished" podID="1c1dca0a-c782-43f9-9390-7dc9c5311b97" containerID="62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d" exitCode=1 Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.147797 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bbjnj" event={"ID":"1c1dca0a-c782-43f9-9390-7dc9c5311b97","Type":"ContainerDied","Data":"62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.147833 4959 scope.go:117] "RemoveContainer" containerID="17fa1602855e3aabae26ffc0788b95df86b4eda75d67ed28c55ed6d6b1a4b826" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.148278 4959 scope.go:117] "RemoveContainer" containerID="62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d" Jan 28 15:18:58 crc kubenswrapper[4959]: E0128 15:18:58.148561 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-bbjnj_openshift-multus(1c1dca0a-c782-43f9-9390-7dc9c5311b97)\"" pod="openshift-multus/multus-bbjnj" podUID="1c1dca0a-c782-43f9-9390-7dc9c5311b97" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.192871 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.192923 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.192936 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.192952 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.192962 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:58Z","lastTransitionTime":"2026-01-28T15:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.295649 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.295694 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.295703 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.295719 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.295731 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:58Z","lastTransitionTime":"2026-01-28T15:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.398195 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.398245 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.398256 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.398274 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.398286 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:58Z","lastTransitionTime":"2026-01-28T15:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.500861 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.500919 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.500935 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.500958 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.500977 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:58Z","lastTransitionTime":"2026-01-28T15:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.533267 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 23:11:59.61176987 +0000 UTC Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.586851 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.586928 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:18:58 crc kubenswrapper[4959]: E0128 15:18:58.586989 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:18:58 crc kubenswrapper[4959]: E0128 15:18:58.587145 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.602947 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.602990 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.603003 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.603023 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.603036 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:58Z","lastTransitionTime":"2026-01-28T15:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.705713 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.705759 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.705772 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.705791 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.705804 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:58Z","lastTransitionTime":"2026-01-28T15:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.746650 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.746696 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.746705 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.746721 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.746732 4959 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T15:18:58Z","lastTransitionTime":"2026-01-28T15:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.785747 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt"] Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.786098 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.788209 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.788282 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.788304 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.788402 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.804851 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a364c866-531d-400b-871b-92842bb2544f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.804896 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/a364c866-531d-400b-871b-92842bb2544f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.804914 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a364c866-531d-400b-871b-92842bb2544f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.804939 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/a364c866-531d-400b-871b-92842bb2544f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.804958 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a364c866-531d-400b-871b-92842bb2544f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.905770 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a364c866-531d-400b-871b-92842bb2544f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.905827 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/a364c866-531d-400b-871b-92842bb2544f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.905852 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a364c866-531d-400b-871b-92842bb2544f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.905899 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/a364c866-531d-400b-871b-92842bb2544f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.905922 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a364c866-531d-400b-871b-92842bb2544f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.906289 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/a364c866-531d-400b-871b-92842bb2544f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.906351 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/a364c866-531d-400b-871b-92842bb2544f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.907132 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a364c866-531d-400b-871b-92842bb2544f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.913951 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a364c866-531d-400b-871b-92842bb2544f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:58 crc kubenswrapper[4959]: I0128 15:18:58.922768 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a364c866-531d-400b-871b-92842bb2544f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kpclt\" (UID: \"a364c866-531d-400b-871b-92842bb2544f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:59 crc kubenswrapper[4959]: I0128 15:18:59.098185 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" Jan 28 15:18:59 crc kubenswrapper[4959]: I0128 15:18:59.152024 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" event={"ID":"a364c866-531d-400b-871b-92842bb2544f","Type":"ContainerStarted","Data":"dfbe6b5ffe4bc425cc29ae541e6117dd62da904da14c4154152d98c3fb50ac02"} Jan 28 15:18:59 crc kubenswrapper[4959]: I0128 15:18:59.154208 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/1.log" Jan 28 15:18:59 crc kubenswrapper[4959]: I0128 15:18:59.534058 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 23:03:29.104665402 +0000 UTC Jan 28 15:18:59 crc kubenswrapper[4959]: I0128 15:18:59.534150 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 28 15:18:59 crc kubenswrapper[4959]: I0128 15:18:59.543748 4959 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 15:18:59 crc kubenswrapper[4959]: I0128 15:18:59.586470 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:18:59 crc kubenswrapper[4959]: I0128 15:18:59.586700 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:18:59 crc kubenswrapper[4959]: E0128 15:18:59.586805 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:18:59 crc kubenswrapper[4959]: E0128 15:18:59.586889 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:00 crc kubenswrapper[4959]: I0128 15:19:00.158183 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" event={"ID":"a364c866-531d-400b-871b-92842bb2544f","Type":"ContainerStarted","Data":"929af344206211845d93800f3b33c6645aa36b945243bbc5e6740faca4c58f76"} Jan 28 15:19:00 crc kubenswrapper[4959]: I0128 15:19:00.173041 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpclt" podStartSLOduration=97.173019195 podStartE2EDuration="1m37.173019195s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:00.172537173 +0000 UTC m=+123.618443566" watchObservedRunningTime="2026-01-28 15:19:00.173019195 +0000 UTC m=+123.618925578" Jan 28 15:19:00 crc kubenswrapper[4959]: E0128 15:19:00.451187 4959 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 28 15:19:00 crc kubenswrapper[4959]: I0128 15:19:00.586695 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:00 crc kubenswrapper[4959]: I0128 15:19:00.586727 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:00 crc kubenswrapper[4959]: E0128 15:19:00.587926 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:19:00 crc kubenswrapper[4959]: E0128 15:19:00.588325 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:00 crc kubenswrapper[4959]: E0128 15:19:00.674501 4959 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 15:19:01 crc kubenswrapper[4959]: I0128 15:19:01.586618 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:01 crc kubenswrapper[4959]: I0128 15:19:01.586634 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:01 crc kubenswrapper[4959]: E0128 15:19:01.586770 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:19:01 crc kubenswrapper[4959]: E0128 15:19:01.586933 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:02 crc kubenswrapper[4959]: I0128 15:19:02.586461 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:02 crc kubenswrapper[4959]: I0128 15:19:02.586573 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:02 crc kubenswrapper[4959]: E0128 15:19:02.586649 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:02 crc kubenswrapper[4959]: E0128 15:19:02.586725 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:19:02 crc kubenswrapper[4959]: I0128 15:19:02.587440 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:19:03 crc kubenswrapper[4959]: I0128 15:19:03.190647 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/3.log" Jan 28 15:19:03 crc kubenswrapper[4959]: I0128 15:19:03.193489 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerStarted","Data":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} Jan 28 15:19:03 crc kubenswrapper[4959]: I0128 15:19:03.194060 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:19:03 crc kubenswrapper[4959]: I0128 15:19:03.586493 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:03 crc kubenswrapper[4959]: I0128 15:19:03.586552 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:03 crc kubenswrapper[4959]: E0128 15:19:03.586641 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:03 crc kubenswrapper[4959]: E0128 15:19:03.586774 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:19:03 crc kubenswrapper[4959]: I0128 15:19:03.773715 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podStartSLOduration=99.773689918 podStartE2EDuration="1m39.773689918s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:03.222003339 +0000 UTC m=+126.667909742" watchObservedRunningTime="2026-01-28 15:19:03.773689918 +0000 UTC m=+127.219596301" Jan 28 15:19:03 crc kubenswrapper[4959]: I0128 15:19:03.774334 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4d9tj"] Jan 28 15:19:03 crc kubenswrapper[4959]: I0128 15:19:03.774479 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:03 crc kubenswrapper[4959]: E0128 15:19:03.774588 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:04 crc kubenswrapper[4959]: I0128 15:19:04.586351 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:04 crc kubenswrapper[4959]: E0128 15:19:04.586508 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:19:05 crc kubenswrapper[4959]: I0128 15:19:05.586586 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:05 crc kubenswrapper[4959]: I0128 15:19:05.586617 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:05 crc kubenswrapper[4959]: I0128 15:19:05.586586 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:05 crc kubenswrapper[4959]: E0128 15:19:05.586748 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:05 crc kubenswrapper[4959]: E0128 15:19:05.586795 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:05 crc kubenswrapper[4959]: E0128 15:19:05.586878 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:19:05 crc kubenswrapper[4959]: E0128 15:19:05.675735 4959 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 15:19:06 crc kubenswrapper[4959]: I0128 15:19:06.586043 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:06 crc kubenswrapper[4959]: E0128 15:19:06.586232 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:19:07 crc kubenswrapper[4959]: I0128 15:19:07.586216 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:07 crc kubenswrapper[4959]: I0128 15:19:07.586289 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:07 crc kubenswrapper[4959]: E0128 15:19:07.586369 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:07 crc kubenswrapper[4959]: E0128 15:19:07.586420 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:19:07 crc kubenswrapper[4959]: I0128 15:19:07.586868 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:07 crc kubenswrapper[4959]: E0128 15:19:07.587014 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:08 crc kubenswrapper[4959]: I0128 15:19:08.586557 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:08 crc kubenswrapper[4959]: E0128 15:19:08.587360 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:19:09 crc kubenswrapper[4959]: I0128 15:19:09.587002 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:09 crc kubenswrapper[4959]: I0128 15:19:09.587013 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:09 crc kubenswrapper[4959]: E0128 15:19:09.587183 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:19:09 crc kubenswrapper[4959]: I0128 15:19:09.587013 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:09 crc kubenswrapper[4959]: E0128 15:19:09.587274 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:09 crc kubenswrapper[4959]: E0128 15:19:09.587393 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:10 crc kubenswrapper[4959]: I0128 15:19:10.587096 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:10 crc kubenswrapper[4959]: E0128 15:19:10.588682 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:19:10 crc kubenswrapper[4959]: E0128 15:19:10.676615 4959 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 28 15:19:11 crc kubenswrapper[4959]: I0128 15:19:11.586937 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:11 crc kubenswrapper[4959]: I0128 15:19:11.587007 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:11 crc kubenswrapper[4959]: I0128 15:19:11.587055 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:11 crc kubenswrapper[4959]: E0128 15:19:11.587094 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:11 crc kubenswrapper[4959]: E0128 15:19:11.587193 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:11 crc kubenswrapper[4959]: E0128 15:19:11.587262 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:19:12 crc kubenswrapper[4959]: I0128 15:19:12.586449 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:12 crc kubenswrapper[4959]: E0128 15:19:12.586606 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:19:12 crc kubenswrapper[4959]: I0128 15:19:12.587048 4959 scope.go:117] "RemoveContainer" containerID="62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d" Jan 28 15:19:13 crc kubenswrapper[4959]: I0128 15:19:13.228675 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/1.log" Jan 28 15:19:13 crc kubenswrapper[4959]: I0128 15:19:13.228727 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bbjnj" event={"ID":"1c1dca0a-c782-43f9-9390-7dc9c5311b97","Type":"ContainerStarted","Data":"2ce5ba0a6fc65f5c4deba9c42f6a6ee7c031bd8945252d3ceb3501661be8235b"} Jan 28 15:19:13 crc kubenswrapper[4959]: I0128 15:19:13.586316 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:13 crc kubenswrapper[4959]: I0128 15:19:13.586356 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:13 crc kubenswrapper[4959]: I0128 15:19:13.586356 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:13 crc kubenswrapper[4959]: E0128 15:19:13.586495 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:13 crc kubenswrapper[4959]: E0128 15:19:13.586741 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:19:13 crc kubenswrapper[4959]: E0128 15:19:13.586874 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:14 crc kubenswrapper[4959]: I0128 15:19:14.586248 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:14 crc kubenswrapper[4959]: E0128 15:19:14.586542 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 15:19:15 crc kubenswrapper[4959]: I0128 15:19:15.586499 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:15 crc kubenswrapper[4959]: I0128 15:19:15.586741 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:15 crc kubenswrapper[4959]: E0128 15:19:15.586784 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4d9tj" podUID="943bb4d7-0907-4b19-b9e0-580af6061632" Jan 28 15:19:15 crc kubenswrapper[4959]: I0128 15:19:15.586871 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:15 crc kubenswrapper[4959]: E0128 15:19:15.587183 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 15:19:15 crc kubenswrapper[4959]: E0128 15:19:15.587416 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 15:19:16 crc kubenswrapper[4959]: I0128 15:19:16.586236 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:16 crc kubenswrapper[4959]: I0128 15:19:16.588698 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 15:19:16 crc kubenswrapper[4959]: I0128 15:19:16.588694 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 15:19:17 crc kubenswrapper[4959]: I0128 15:19:17.586958 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:17 crc kubenswrapper[4959]: I0128 15:19:17.587038 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:17 crc kubenswrapper[4959]: I0128 15:19:17.587131 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:17 crc kubenswrapper[4959]: I0128 15:19:17.591149 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 15:19:17 crc kubenswrapper[4959]: I0128 15:19:17.591322 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 15:19:17 crc kubenswrapper[4959]: I0128 15:19:17.591323 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 15:19:17 crc kubenswrapper[4959]: I0128 15:19:17.592967 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.422292 4959 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.477047 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ld2wn"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.478147 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.481537 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-gqkxc"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.484236 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.485205 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.494914 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.495540 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.496119 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.496800 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.508245 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.508646 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-l7lfr"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.509057 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.509686 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.509977 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.511858 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-n5pdh"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.512944 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-nk6xq"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.513541 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.513888 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.514106 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.514587 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.514840 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.514984 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.515330 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.514657 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.515839 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.514723 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.514757 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.514802 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.516451 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.516722 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hr5x"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.517299 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.517854 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.517974 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-etcd-serving-ca\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518089 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-serving-cert\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518223 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-policies\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518310 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518396 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/16e045df-c42a-4f79-9bbc-7504250fdb81-available-featuregates\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518481 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518552 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-dir\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518618 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518695 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518772 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgvdr\" (UniqueName: \"kubernetes.io/projected/16e045df-c42a-4f79-9bbc-7504250fdb81-kube-api-access-tgvdr\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518844 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-config\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.518933 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7w76\" (UniqueName: \"kubernetes.io/projected/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-kube-api-access-h7w76\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519034 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519141 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16e045df-c42a-4f79-9bbc-7504250fdb81-serving-cert\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519228 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-node-pullsecrets\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519308 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519384 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-image-import-ca\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519470 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-encryption-config\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519542 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-audit-dir\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519622 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519696 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-etcd-client\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519766 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-audit\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519841 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ncb9\" (UniqueName: \"kubernetes.io/projected/29b47bb2-f090-43a4-b2ea-7bb83b683efb-kube-api-access-8ncb9\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519912 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.519989 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghqx2\" (UniqueName: \"kubernetes.io/projected/e84ed88e-eaf6-433c-b930-93f13ed09fcf-kube-api-access-ghqx2\") pod \"downloads-7954f5f757-gqkxc\" (UID: \"e84ed88e-eaf6-433c-b930-93f13ed09fcf\") " pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.520078 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.526897 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.527065 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.520316 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.522607 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5vzvg"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.528232 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-dmkr9"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.528948 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.522528 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.522792 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.522829 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.529694 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.530073 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.529585 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.522871 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536518 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-fphck"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.538930 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.522897 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.522927 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.522939 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.523015 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.523073 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.523336 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.523392 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.523455 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.523507 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.523559 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.524393 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.535692 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.535765 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.535993 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536042 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536088 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536186 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536226 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536254 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536273 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536318 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536388 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536411 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536448 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536451 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536476 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536584 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536585 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536692 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536851 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.536888 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.537521 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.538015 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.538265 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.538580 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.540437 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.540726 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-b7ncr"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.541099 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-ddn7x"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.541518 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.541949 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.541121 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.541173 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.542241 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.541785 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.542453 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.543160 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.544081 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.544314 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.547885 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.548672 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.553795 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plqj9"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.572997 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.573800 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.574082 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.575567 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.575866 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.579518 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.579839 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.580170 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.580246 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.580426 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.580782 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.580906 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.581022 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.581069 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.581184 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.582305 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.583179 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.583186 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.583247 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.584029 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.584574 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.584609 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.584572 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.584577 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.597335 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.599722 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k7vzj"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.600685 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.600686 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.600971 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.601230 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.601365 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.601721 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.602216 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ljp65"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.602510 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.602809 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.602867 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.602941 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.603741 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.603877 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.603910 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.604011 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.604144 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.604324 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.604554 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.605034 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.605470 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.605633 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.605651 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.609471 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.611081 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.615238 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.617382 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.618476 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zklvs"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.618979 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.622392 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.623095 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.624608 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.625266 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.625526 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.627100 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.627970 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.628032 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-etcd-serving-ca\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.628078 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-serving-cert\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.628619 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.629822 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630004 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-etcd-serving-ca\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630026 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630071 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630464 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.628106 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-policies\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630717 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630769 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/16e045df-c42a-4f79-9bbc-7504250fdb81-available-featuregates\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630816 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630866 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-dir\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630896 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630919 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630948 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgvdr\" (UniqueName: \"kubernetes.io/projected/16e045df-c42a-4f79-9bbc-7504250fdb81-kube-api-access-tgvdr\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630969 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-config\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.630988 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7w76\" (UniqueName: \"kubernetes.io/projected/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-kube-api-access-h7w76\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.631013 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.631043 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16e045df-c42a-4f79-9bbc-7504250fdb81-serving-cert\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.631072 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-node-pullsecrets\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.631130 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.631160 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-image-import-ca\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.631168 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-policies\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.632409 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.635813 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.643399 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/16e045df-c42a-4f79-9bbc-7504250fdb81-available-featuregates\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.648438 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.650829 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651415 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-encryption-config\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651502 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-audit-dir\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651572 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-etcd-client\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651608 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651641 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-audit\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651689 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ncb9\" (UniqueName: \"kubernetes.io/projected/29b47bb2-f090-43a4-b2ea-7bb83b683efb-kube-api-access-8ncb9\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651721 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651764 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghqx2\" (UniqueName: \"kubernetes.io/projected/e84ed88e-eaf6-433c-b930-93f13ed09fcf-kube-api-access-ghqx2\") pod \"downloads-7954f5f757-gqkxc\" (UID: \"e84ed88e-eaf6-433c-b930-93f13ed09fcf\") " pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651793 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651822 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.651876 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.652695 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.652880 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-audit-dir\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.655145 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-dir\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.656489 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-config\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.656872 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-node-pullsecrets\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.657475 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.658271 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-image-import-ca\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.658304 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.659280 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.659610 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-serving-cert\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.659724 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16e045df-c42a-4f79-9bbc-7504250fdb81-serving-cert\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.660253 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-audit\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.662860 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.663964 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.664726 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.664996 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.665750 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-5xm4l"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.666302 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.666395 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.666896 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.667076 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.667310 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.667758 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.668590 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.668613 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-etcd-client\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.668869 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.669743 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.670517 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.671178 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.672224 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.672753 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.672899 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.673355 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.673727 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.674506 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.674599 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.674690 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-encryption-config\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.675299 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.676529 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.676873 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.676905 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-gqkxc"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.676917 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.677005 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.680771 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.684853 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.686649 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ld2wn"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.687826 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-977m5"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.689099 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.689524 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-ctvhn"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.690849 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.692200 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-ddn7x"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.692823 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hr5x"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.694027 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-fphck"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.695129 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-n5pdh"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.696278 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.697629 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k7vzj"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.698540 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5vzvg"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.700011 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.703386 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.705388 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.706695 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ljp65"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.707773 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.710016 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.711082 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-l7lfr"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.712230 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-dmkr9"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.714394 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.715994 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.716774 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-b7ncr"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.718301 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.719534 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.720587 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.721476 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.722270 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.723400 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plqj9"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.724769 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.726659 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zklvs"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.728791 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.730889 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-9s7sv"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.731584 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.732671 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.734396 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.735883 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.737213 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.738186 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-886ph"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.740244 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.740518 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-nk6xq"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.742714 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-ctvhn"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.742733 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-886ph"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.740666 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-886ph" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.743035 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.744064 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-977m5"] Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.760702 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.781072 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.800298 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.827726 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.840863 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.861420 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.880967 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.920799 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.941234 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955364 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955404 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9njd\" (UniqueName: \"kubernetes.io/projected/3ce471aa-c701-4901-b0ed-c66d86cd0059-kube-api-access-s9njd\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955430 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgxjl\" (UniqueName: \"kubernetes.io/projected/b26918d7-cc7c-4925-8e2f-ba17e60177d5-kube-api-access-mgxjl\") pod \"cluster-samples-operator-665b6dd947-z8frl\" (UID: \"b26918d7-cc7c-4925-8e2f-ba17e60177d5\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955457 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955474 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2whj7\" (UniqueName: \"kubernetes.io/projected/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-kube-api-access-2whj7\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955492 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-client-ca\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955515 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b26918d7-cc7c-4925-8e2f-ba17e60177d5-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-z8frl\" (UID: \"b26918d7-cc7c-4925-8e2f-ba17e60177d5\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955531 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cl5bx\" (UniqueName: \"kubernetes.io/projected/6d9aabc2-ed95-420b-afad-7af29c415329-kube-api-access-cl5bx\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955553 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6d9aabc2-ed95-420b-afad-7af29c415329-auth-proxy-config\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955572 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/97307cd7-d817-4da5-b2bc-adbb5f1406c8-metrics-tls\") pod \"dns-operator-744455d44c-ld2wn\" (UID: \"97307cd7-d817-4da5-b2bc-adbb5f1406c8\") " pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955592 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c29792dd-faa1-4f0c-b405-e0de581ee26f-ca-trust-extracted\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955609 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-serving-cert\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955634 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-audit-policies\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955650 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d9aabc2-ed95-420b-afad-7af29c415329-config\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955699 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vszr\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-kube-api-access-2vszr\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955723 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-serving-cert\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955740 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-config\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955755 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-bound-sa-token\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955771 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-tls\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955787 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3ce471aa-c701-4901-b0ed-c66d86cd0059-audit-dir\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955807 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-trusted-ca\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955822 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6p74z\" (UniqueName: \"kubernetes.io/projected/97307cd7-d817-4da5-b2bc-adbb5f1406c8-kube-api-access-6p74z\") pod \"dns-operator-744455d44c-ld2wn\" (UID: \"97307cd7-d817-4da5-b2bc-adbb5f1406c8\") " pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955851 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6d9aabc2-ed95-420b-afad-7af29c415329-machine-approver-tls\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955866 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-certificates\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955883 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-etcd-client\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955898 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-encryption-config\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955921 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.955939 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c29792dd-faa1-4f0c-b405-e0de581ee26f-installation-pull-secrets\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:19 crc kubenswrapper[4959]: E0128 15:19:19.956488 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.456473114 +0000 UTC m=+143.902379497 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.962150 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 15:19:19 crc kubenswrapper[4959]: I0128 15:19:19.981528 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.000936 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.022518 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.041454 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.057240 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.057597 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.557540839 +0000 UTC m=+144.003447272 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.057821 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-serving-cert\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.057914 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c2d3b29-3f92-4a16-9a53-0e98fb387802-serving-cert\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.057982 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-node-bootstrap-token\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058046 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lkzr\" (UniqueName: \"kubernetes.io/projected/6c2d3b29-3f92-4a16-9a53-0e98fb387802-kube-api-access-9lkzr\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058149 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfgns\" (UniqueName: \"kubernetes.io/projected/dfb911c8-efb4-4973-954c-808a8e87f0fe-kube-api-access-jfgns\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058197 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/994c73fc-b24b-4cef-8145-561cc1dca4c7-config\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058252 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-bound-sa-token\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058364 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-tls\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058503 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-metrics-tls\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058660 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3ce471aa-c701-4901-b0ed-c66d86cd0059-audit-dir\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058748 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vp5q\" (UniqueName: \"kubernetes.io/projected/6d14429e-3f90-4010-9b67-b0b6e81db122-kube-api-access-8vp5q\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058818 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-trusted-ca\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058832 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3ce471aa-c701-4901-b0ed-c66d86cd0059-audit-dir\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058914 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd4a69ec-a69e-45fa-8105-d50da9b41212-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.058982 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0aedd299-b681-4f9f-b92e-d2bf27be7d06-service-ca-bundle\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059038 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e8823a3-69b2-4e0d-9c22-61c698970b38-config\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059096 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62ef9da5-17d7-4dab-9868-9e7c7694b799-metrics-tls\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059193 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7cef5bdc-f62b-4872-bc40-59765c1faa27-cert\") pod \"ingress-canary-977m5\" (UID: \"7cef5bdc-f62b-4872-bc40-59765c1faa27\") " pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059260 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-etcd-client\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059321 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-encryption-config\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059376 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/994c73fc-b24b-4cef-8145-561cc1dca4c7-serving-cert\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059443 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8b131611-c7ac-4409-9f6f-6f63309e4c55-profile-collector-cert\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059579 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a215a092-653c-4a83-9901-e5094b2c0f12-proxy-tls\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059610 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3c910a54-aff5-4519-9a44-d17cc6001208-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059677 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c29792dd-faa1-4f0c-b405-e0de581ee26f-installation-pull-secrets\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059708 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-config\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059733 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e8823a3-69b2-4e0d-9c22-61c698970b38-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059769 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/62ef9da5-17d7-4dab-9868-9e7c7694b799-config-volume\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059807 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2d4611de-0934-450c-a51e-67298e455900-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059831 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0e736f-1d0f-4c59-ab45-b057dda052aa-secret-volume\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059852 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-registration-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059877 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5013fa35-7674-41bd-83a4-d01ff22253e9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k7vzj\" (UID: \"5013fa35-7674-41bd-83a4-d01ff22253e9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059904 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.059986 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9njd\" (UniqueName: \"kubernetes.io/projected/3ce471aa-c701-4901-b0ed-c66d86cd0059-kube-api-access-s9njd\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060064 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/994c73fc-b24b-4cef-8145-561cc1dca4c7-trusted-ca\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060085 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-oauth-serving-cert\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060313 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060359 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-config\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060380 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83cc5140-ea3b-4939-a8c8-46566dea8c2d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060410 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-ca\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060429 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdgtk\" (UniqueName: \"kubernetes.io/projected/5013fa35-7674-41bd-83a4-d01ff22253e9-kube-api-access-fdgtk\") pod \"multus-admission-controller-857f4d67dd-k7vzj\" (UID: \"5013fa35-7674-41bd-83a4-d01ff22253e9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060493 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-config\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060563 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d2d96b34-2e44-4d18-a591-2c286c762bf9-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-884qg\" (UID: \"d2d96b34-2e44-4d18-a591-2c286c762bf9\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060613 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-client\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060632 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjmsw\" (UniqueName: \"kubernetes.io/projected/0aedd299-b681-4f9f-b92e-d2bf27be7d06-kube-api-access-qjmsw\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060669 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6zgs\" (UniqueName: \"kubernetes.io/projected/62ef9da5-17d7-4dab-9868-9e7c7694b799-kube-api-access-t6zgs\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060696 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-stats-auth\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060762 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cl5bx\" (UniqueName: \"kubernetes.io/projected/6d9aabc2-ed95-420b-afad-7af29c415329-kube-api-access-cl5bx\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060788 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060818 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-webhook-cert\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060840 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-default-certificate\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060865 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/97307cd7-d817-4da5-b2bc-adbb5f1406c8-metrics-tls\") pod \"dns-operator-744455d44c-ld2wn\" (UID: \"97307cd7-d817-4da5-b2bc-adbb5f1406c8\") " pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060874 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060882 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-config\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060964 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qg4f\" (UniqueName: \"kubernetes.io/projected/d2d96b34-2e44-4d18-a591-2c286c762bf9-kube-api-access-5qg4f\") pod \"control-plane-machine-set-operator-78cbb6b69f-884qg\" (UID: \"d2d96b34-2e44-4d18-a591-2c286c762bf9\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.060976 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061040 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/42901316-efd2-4e0f-9505-f8b0e5cc676c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061084 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-config\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061151 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-trusted-ca\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061391 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0673f696-f58d-4980-8858-3a9aa97eb9dc-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061524 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c4683ca-d036-458c-87ab-aaf398a5dbf5-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-mszk6\" (UID: \"3c4683ca-d036-458c-87ab-aaf398a5dbf5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061688 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc5f4\" (UniqueName: \"kubernetes.io/projected/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-kube-api-access-hc5f4\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061764 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d5aae7df-27bd-4553-8367-c4b6c65906ec-signing-cabundle\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061866 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.061984 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062034 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r4c5\" (UniqueName: \"kubernetes.io/projected/3c4683ca-d036-458c-87ab-aaf398a5dbf5-kube-api-access-8r4c5\") pod \"package-server-manager-789f6589d5-mszk6\" (UID: \"3c4683ca-d036-458c-87ab-aaf398a5dbf5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062090 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-apiservice-cert\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062206 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vszr\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-kube-api-access-2vszr\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062272 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062325 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/dfb911c8-efb4-4973-954c-808a8e87f0fe-images\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062396 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dfb911c8-efb4-4973-954c-808a8e87f0fe-proxy-tls\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062436 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-metrics-certs\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062495 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxxvw\" (UniqueName: \"kubernetes.io/projected/cd4a69ec-a69e-45fa-8105-d50da9b41212-kube-api-access-nxxvw\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062537 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-service-ca\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062568 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsbq2\" (UniqueName: \"kubernetes.io/projected/0518e394-58c0-41d8-9f03-db75743ff4a8-kube-api-access-xsbq2\") pod \"migrator-59844c95c7-nkcwd\" (UID: \"0518e394-58c0-41d8-9f03-db75743ff4a8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062650 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-config\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062710 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42901316-efd2-4e0f-9505-f8b0e5cc676c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062758 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-service-ca\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062797 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26g5c\" (UniqueName: \"kubernetes.io/projected/3c910a54-aff5-4519-9a44-d17cc6001208-kube-api-access-26g5c\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062852 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-tmpfs\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.062974 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-tls\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063023 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/06bff249-845d-4278-a2e5-a2a7c54c2f41-serving-cert\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063230 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbrrc\" (UniqueName: \"kubernetes.io/projected/994c73fc-b24b-4cef-8145-561cc1dca4c7-kube-api-access-hbrrc\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063280 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-encryption-config\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063307 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6p74z\" (UniqueName: \"kubernetes.io/projected/97307cd7-d817-4da5-b2bc-adbb5f1406c8-kube-api-access-6p74z\") pod \"dns-operator-744455d44c-ld2wn\" (UID: \"97307cd7-d817-4da5-b2bc-adbb5f1406c8\") " pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063348 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkftf\" (UniqueName: \"kubernetes.io/projected/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-kube-api-access-lkftf\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063409 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-certificates\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063460 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6d9aabc2-ed95-420b-afad-7af29c415329-machine-approver-tls\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063503 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5tf7\" (UniqueName: \"kubernetes.io/projected/97400b4d-3097-4875-a5ec-08f867212a0e-kube-api-access-b5tf7\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063543 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8b131611-c7ac-4409-9f6f-6f63309e4c55-srv-cert\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063617 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-serving-cert\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063683 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55ppf\" (UniqueName: \"kubernetes.io/projected/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-kube-api-access-55ppf\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063712 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-config\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.063782 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0673f696-f58d-4980-8858-3a9aa97eb9dc-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.064922 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-etcd-client\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.064946 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ce471aa-c701-4901-b0ed-c66d86cd0059-serving-cert\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.064943 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-certificates\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.065008 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-images\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.065071 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97400b4d-3097-4875-a5ec-08f867212a0e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.065172 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw8xl\" (UniqueName: \"kubernetes.io/projected/d5aae7df-27bd-4553-8367-c4b6c65906ec-kube-api-access-jw8xl\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.065357 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066043 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vxxr\" (UniqueName: \"kubernetes.io/projected/83cc5140-ea3b-4939-a8c8-46566dea8c2d-kube-api-access-8vxxr\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.065848 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/97307cd7-d817-4da5-b2bc-adbb5f1406c8-metrics-tls\") pod \"dns-operator-744455d44c-ld2wn\" (UID: \"97307cd7-d817-4da5-b2bc-adbb5f1406c8\") " pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066135 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgxjl\" (UniqueName: \"kubernetes.io/projected/b26918d7-cc7c-4925-8e2f-ba17e60177d5-kube-api-access-mgxjl\") pod \"cluster-samples-operator-665b6dd947-z8frl\" (UID: \"b26918d7-cc7c-4925-8e2f-ba17e60177d5\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.066186 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.566158832 +0000 UTC m=+144.012065215 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066270 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-service-ca-bundle\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066309 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zp9s\" (UniqueName: \"kubernetes.io/projected/0673f696-f58d-4980-8858-3a9aa97eb9dc-kube-api-access-9zp9s\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066329 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-serving-cert\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066353 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t69w\" (UniqueName: \"kubernetes.io/projected/a215a092-653c-4a83-9901-e5094b2c0f12-kube-api-access-2t69w\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066373 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a215a092-653c-4a83-9901-e5094b2c0f12-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066428 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2whj7\" (UniqueName: \"kubernetes.io/projected/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-kube-api-access-2whj7\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066516 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbllz\" (UniqueName: \"kubernetes.io/projected/04f6a584-73bb-4e7d-9d8c-677b58c44944-kube-api-access-zbllz\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066560 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-plugins-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066594 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-client-ca\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066613 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r975\" (UniqueName: \"kubernetes.io/projected/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-kube-api-access-2r975\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066619 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c29792dd-faa1-4f0c-b405-e0de581ee26f-installation-pull-secrets\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066633 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-oauth-config\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066725 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97400b4d-3097-4875-a5ec-08f867212a0e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066766 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ghv5\" (UniqueName: \"kubernetes.io/projected/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-kube-api-access-6ghv5\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066811 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dfb911c8-efb4-4973-954c-808a8e87f0fe-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066847 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-mountpoint-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066883 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6d9aabc2-ed95-420b-afad-7af29c415329-machine-approver-tls\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066929 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b26918d7-cc7c-4925-8e2f-ba17e60177d5-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-z8frl\" (UID: \"b26918d7-cc7c-4925-8e2f-ba17e60177d5\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.066977 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e8823a3-69b2-4e0d-9c22-61c698970b38-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067014 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-trusted-ca-bundle\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067148 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6d9aabc2-ed95-420b-afad-7af29c415329-auth-proxy-config\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067194 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067232 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-client-ca\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067452 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/cd4a69ec-a69e-45fa-8105-d50da9b41212-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067484 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83cc5140-ea3b-4939-a8c8-46566dea8c2d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067538 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d5aae7df-27bd-4553-8367-c4b6c65906ec-signing-key\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067643 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-socket-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067654 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-client-ca\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067679 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glzgm\" (UniqueName: \"kubernetes.io/projected/8b131611-c7ac-4409-9f6f-6f63309e4c55-kube-api-access-glzgm\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067757 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-certs\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067889 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rrvg\" (UniqueName: \"kubernetes.io/projected/d2b82dee-9207-4cad-ad4f-364e0c680d4c-kube-api-access-7rrvg\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.067952 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42901316-efd2-4e0f-9505-f8b0e5cc676c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.068046 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c29792dd-faa1-4f0c-b405-e0de581ee26f-ca-trust-extracted\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.068073 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6d9aabc2-ed95-420b-afad-7af29c415329-auth-proxy-config\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.068174 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-serving-cert\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.068403 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znblt\" (UniqueName: \"kubernetes.io/projected/2d4611de-0934-450c-a51e-67298e455900-kube-api-access-znblt\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.068438 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-trusted-ca\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.068477 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c29792dd-faa1-4f0c-b405-e0de581ee26f-ca-trust-extracted\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.068483 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-audit-policies\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069053 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d9aabc2-ed95-420b-afad-7af29c415329-config\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069216 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d14429e-3f90-4010-9b67-b0b6e81db122-serving-cert\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069327 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd4a69ec-a69e-45fa-8105-d50da9b41212-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069404 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0e736f-1d0f-4c59-ab45-b057dda052aa-config-volume\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069496 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvp97\" (UniqueName: \"kubernetes.io/projected/3a0e736f-1d0f-4c59-ab45-b057dda052aa-kube-api-access-jvp97\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069585 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069695 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3c910a54-aff5-4519-9a44-d17cc6001208-srv-cert\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069739 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3ce471aa-c701-4901-b0ed-c66d86cd0059-audit-policies\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069767 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkgqm\" (UniqueName: \"kubernetes.io/projected/06bff249-845d-4278-a2e5-a2a7c54c2f41-kube-api-access-jkgqm\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069809 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-config\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069828 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d9aabc2-ed95-420b-afad-7af29c415329-config\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069837 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-csi-data-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069917 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxtzt\" (UniqueName: \"kubernetes.io/projected/7cef5bdc-f62b-4872-bc40-59765c1faa27-kube-api-access-qxtzt\") pod \"ingress-canary-977m5\" (UID: \"7cef5bdc-f62b-4872-bc40-59765c1faa27\") " pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069952 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d4611de-0934-450c-a51e-67298e455900-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.069980 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-config\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.070015 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.070862 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b26918d7-cc7c-4925-8e2f-ba17e60177d5-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-z8frl\" (UID: \"b26918d7-cc7c-4925-8e2f-ba17e60177d5\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.072553 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-serving-cert\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.080979 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.109220 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.121601 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.161460 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.171517 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.171829 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/62ef9da5-17d7-4dab-9868-9e7c7694b799-config-volume\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.171867 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2d4611de-0934-450c-a51e-67298e455900-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.171888 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0e736f-1d0f-4c59-ab45-b057dda052aa-secret-volume\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.171920 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-registration-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.171942 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5013fa35-7674-41bd-83a4-d01ff22253e9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k7vzj\" (UID: \"5013fa35-7674-41bd-83a4-d01ff22253e9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.171999 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.671955773 +0000 UTC m=+144.117862156 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172078 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/994c73fc-b24b-4cef-8145-561cc1dca4c7-trusted-ca\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172143 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-oauth-serving-cert\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172173 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83cc5140-ea3b-4939-a8c8-46566dea8c2d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172210 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-config\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172236 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-ca\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172263 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdgtk\" (UniqueName: \"kubernetes.io/projected/5013fa35-7674-41bd-83a4-d01ff22253e9-kube-api-access-fdgtk\") pod \"multus-admission-controller-857f4d67dd-k7vzj\" (UID: \"5013fa35-7674-41bd-83a4-d01ff22253e9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172295 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-config\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172339 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d2d96b34-2e44-4d18-a591-2c286c762bf9-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-884qg\" (UID: \"d2d96b34-2e44-4d18-a591-2c286c762bf9\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172373 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-client\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172404 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjmsw\" (UniqueName: \"kubernetes.io/projected/0aedd299-b681-4f9f-b92e-d2bf27be7d06-kube-api-access-qjmsw\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172433 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6zgs\" (UniqueName: \"kubernetes.io/projected/62ef9da5-17d7-4dab-9868-9e7c7694b799-kube-api-access-t6zgs\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172464 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-stats-auth\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172494 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-default-certificate\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172530 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172559 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-webhook-cert\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172584 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-config\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172610 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qg4f\" (UniqueName: \"kubernetes.io/projected/d2d96b34-2e44-4d18-a591-2c286c762bf9-kube-api-access-5qg4f\") pod \"control-plane-machine-set-operator-78cbb6b69f-884qg\" (UID: \"d2d96b34-2e44-4d18-a591-2c286c762bf9\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172636 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-config\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172665 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/42901316-efd2-4e0f-9505-f8b0e5cc676c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172685 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc5f4\" (UniqueName: \"kubernetes.io/projected/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-kube-api-access-hc5f4\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172705 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d5aae7df-27bd-4553-8367-c4b6c65906ec-signing-cabundle\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172744 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0673f696-f58d-4980-8858-3a9aa97eb9dc-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172764 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c4683ca-d036-458c-87ab-aaf398a5dbf5-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-mszk6\" (UID: \"3c4683ca-d036-458c-87ab-aaf398a5dbf5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172790 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172809 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r4c5\" (UniqueName: \"kubernetes.io/projected/3c4683ca-d036-458c-87ab-aaf398a5dbf5-kube-api-access-8r4c5\") pod \"package-server-manager-789f6589d5-mszk6\" (UID: \"3c4683ca-d036-458c-87ab-aaf398a5dbf5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172832 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-apiservice-cert\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172851 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-metrics-certs\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172886 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172909 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/dfb911c8-efb4-4973-954c-808a8e87f0fe-images\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172931 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dfb911c8-efb4-4973-954c-808a8e87f0fe-proxy-tls\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172955 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsbq2\" (UniqueName: \"kubernetes.io/projected/0518e394-58c0-41d8-9f03-db75743ff4a8-kube-api-access-xsbq2\") pod \"migrator-59844c95c7-nkcwd\" (UID: \"0518e394-58c0-41d8-9f03-db75743ff4a8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172980 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxxvw\" (UniqueName: \"kubernetes.io/projected/cd4a69ec-a69e-45fa-8105-d50da9b41212-kube-api-access-nxxvw\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173001 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-service-ca\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173023 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42901316-efd2-4e0f-9505-f8b0e5cc676c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173039 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-service-ca\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173058 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26g5c\" (UniqueName: \"kubernetes.io/projected/3c910a54-aff5-4519-9a44-d17cc6001208-kube-api-access-26g5c\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173076 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-tmpfs\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173099 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/06bff249-845d-4278-a2e5-a2a7c54c2f41-serving-cert\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173171 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbrrc\" (UniqueName: \"kubernetes.io/projected/994c73fc-b24b-4cef-8145-561cc1dca4c7-kube-api-access-hbrrc\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173202 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkftf\" (UniqueName: \"kubernetes.io/projected/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-kube-api-access-lkftf\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173223 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-serving-cert\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173242 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55ppf\" (UniqueName: \"kubernetes.io/projected/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-kube-api-access-55ppf\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173267 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5tf7\" (UniqueName: \"kubernetes.io/projected/97400b4d-3097-4875-a5ec-08f867212a0e-kube-api-access-b5tf7\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173287 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8b131611-c7ac-4409-9f6f-6f63309e4c55-srv-cert\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173307 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0673f696-f58d-4980-8858-3a9aa97eb9dc-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173326 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-images\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173347 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97400b4d-3097-4875-a5ec-08f867212a0e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173371 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw8xl\" (UniqueName: \"kubernetes.io/projected/d5aae7df-27bd-4553-8367-c4b6c65906ec-kube-api-access-jw8xl\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173400 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173418 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-ca\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173437 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vxxr\" (UniqueName: \"kubernetes.io/projected/83cc5140-ea3b-4939-a8c8-46566dea8c2d-kube-api-access-8vxxr\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173528 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-service-ca-bundle\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173564 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zp9s\" (UniqueName: \"kubernetes.io/projected/0673f696-f58d-4980-8858-3a9aa97eb9dc-kube-api-access-9zp9s\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173588 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-serving-cert\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173620 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-config\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173631 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a215a092-653c-4a83-9901-e5094b2c0f12-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173661 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t69w\" (UniqueName: \"kubernetes.io/projected/a215a092-653c-4a83-9901-e5094b2c0f12-kube-api-access-2t69w\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173700 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbllz\" (UniqueName: \"kubernetes.io/projected/04f6a584-73bb-4e7d-9d8c-677b58c44944-kube-api-access-zbllz\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173730 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-plugins-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173758 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r975\" (UniqueName: \"kubernetes.io/projected/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-kube-api-access-2r975\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173783 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-oauth-config\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.172343 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-registration-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173809 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97400b4d-3097-4875-a5ec-08f867212a0e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173835 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ghv5\" (UniqueName: \"kubernetes.io/projected/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-kube-api-access-6ghv5\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173862 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dfb911c8-efb4-4973-954c-808a8e87f0fe-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173885 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-mountpoint-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173914 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e8823a3-69b2-4e0d-9c22-61c698970b38-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173936 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-trusted-ca-bundle\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173960 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.173988 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-client-ca\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174013 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83cc5140-ea3b-4939-a8c8-46566dea8c2d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174042 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d5aae7df-27bd-4553-8367-c4b6c65906ec-signing-key\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174128 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/cd4a69ec-a69e-45fa-8105-d50da9b41212-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174163 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glzgm\" (UniqueName: \"kubernetes.io/projected/8b131611-c7ac-4409-9f6f-6f63309e4c55-kube-api-access-glzgm\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174191 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-socket-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174221 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-config\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174235 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42901316-efd2-4e0f-9505-f8b0e5cc676c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174242 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-config\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174363 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/994c73fc-b24b-4cef-8145-561cc1dca4c7-trusted-ca\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.174482 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-oauth-serving-cert\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.174487 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.674473746 +0000 UTC m=+144.120380129 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175228 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0673f696-f58d-4980-8858-3a9aa97eb9dc-config\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175260 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-certs\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175367 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rrvg\" (UniqueName: \"kubernetes.io/projected/d2b82dee-9207-4cad-ad4f-364e0c680d4c-kube-api-access-7rrvg\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175408 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znblt\" (UniqueName: \"kubernetes.io/projected/2d4611de-0934-450c-a51e-67298e455900-kube-api-access-znblt\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175420 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-config\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175453 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175438 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-trusted-ca\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175517 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d14429e-3f90-4010-9b67-b0b6e81db122-serving-cert\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175544 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd4a69ec-a69e-45fa-8105-d50da9b41212-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175565 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0e736f-1d0f-4c59-ab45-b057dda052aa-config-volume\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175582 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-trusted-ca-bundle\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175592 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvp97\" (UniqueName: \"kubernetes.io/projected/3a0e736f-1d0f-4c59-ab45-b057dda052aa-kube-api-access-jvp97\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175650 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175676 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3c910a54-aff5-4519-9a44-d17cc6001208-srv-cert\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175713 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkgqm\" (UniqueName: \"kubernetes.io/projected/06bff249-845d-4278-a2e5-a2a7c54c2f41-kube-api-access-jkgqm\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175739 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-config\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.175842 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-csi-data-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176053 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxtzt\" (UniqueName: \"kubernetes.io/projected/7cef5bdc-f62b-4872-bc40-59765c1faa27-kube-api-access-qxtzt\") pod \"ingress-canary-977m5\" (UID: \"7cef5bdc-f62b-4872-bc40-59765c1faa27\") " pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176079 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d4611de-0934-450c-a51e-67298e455900-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176106 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-config\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176148 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176167 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-client-ca\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176190 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c2d3b29-3f92-4a16-9a53-0e98fb387802-serving-cert\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176213 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lkzr\" (UniqueName: \"kubernetes.io/projected/6c2d3b29-3f92-4a16-9a53-0e98fb387802-kube-api-access-9lkzr\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176305 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-node-bootstrap-token\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176362 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfgns\" (UniqueName: \"kubernetes.io/projected/dfb911c8-efb4-4973-954c-808a8e87f0fe-kube-api-access-jfgns\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176402 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/994c73fc-b24b-4cef-8145-561cc1dca4c7-config\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176447 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-metrics-tls\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176472 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vp5q\" (UniqueName: \"kubernetes.io/projected/6d14429e-3f90-4010-9b67-b0b6e81db122-kube-api-access-8vp5q\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176772 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2d4611de-0934-450c-a51e-67298e455900-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.177715 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-config\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.177791 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d4611de-0934-450c-a51e-67298e455900-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.177921 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-csi-data-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.178139 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.179092 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/994c73fc-b24b-4cef-8145-561cc1dca4c7-config\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.179558 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83cc5140-ea3b-4939-a8c8-46566dea8c2d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.179598 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-images\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.179525 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-config\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.179874 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.179990 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-service-ca\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.176231 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-plugins-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180271 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-client\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180363 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/cd4a69ec-a69e-45fa-8105-d50da9b41212-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180515 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c2d3b29-3f92-4a16-9a53-0e98fb387802-service-ca-bundle\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180540 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-tmpfs\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180750 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d14429e-3f90-4010-9b67-b0b6e81db122-serving-cert\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180836 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd4a69ec-a69e-45fa-8105-d50da9b41212-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180869 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0aedd299-b681-4f9f-b92e-d2bf27be7d06-service-ca-bundle\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180910 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e8823a3-69b2-4e0d-9c22-61c698970b38-config\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180925 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-mountpoint-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.180999 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62ef9da5-17d7-4dab-9868-9e7c7694b799-metrics-tls\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181024 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7cef5bdc-f62b-4872-bc40-59765c1faa27-cert\") pod \"ingress-canary-977m5\" (UID: \"7cef5bdc-f62b-4872-bc40-59765c1faa27\") " pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181054 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8b131611-c7ac-4409-9f6f-6f63309e4c55-profile-collector-cert\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181080 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a215a092-653c-4a83-9901-e5094b2c0f12-proxy-tls\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181134 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3c910a54-aff5-4519-9a44-d17cc6001208-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181214 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/994c73fc-b24b-4cef-8145-561cc1dca4c7-serving-cert\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181252 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-config\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181442 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e8823a3-69b2-4e0d-9c22-61c698970b38-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181659 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83cc5140-ea3b-4939-a8c8-46566dea8c2d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.181788 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d2b82dee-9207-4cad-ad4f-364e0c680d4c-socket-dir\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.182237 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/6d14429e-3f90-4010-9b67-b0b6e81db122-etcd-service-ca\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.182393 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/06bff249-845d-4278-a2e5-a2a7c54c2f41-serving-cert\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.182582 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.182682 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dfb911c8-efb4-4973-954c-808a8e87f0fe-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.183638 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a215a092-653c-4a83-9901-e5094b2c0f12-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.183825 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd4a69ec-a69e-45fa-8105-d50da9b41212-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.184340 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.184590 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0673f696-f58d-4980-8858-3a9aa97eb9dc-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.184955 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/994c73fc-b24b-4cef-8145-561cc1dca4c7-serving-cert\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.185486 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-oauth-config\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.186546 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c2d3b29-3f92-4a16-9a53-0e98fb387802-serving-cert\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.187953 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-serving-cert\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.201520 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.219732 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8b131611-c7ac-4409-9f6f-6f63309e4c55-srv-cert\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.221003 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.224555 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0e736f-1d0f-4c59-ab45-b057dda052aa-secret-volume\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.225684 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8b131611-c7ac-4409-9f6f-6f63309e4c55-profile-collector-cert\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.225772 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3c910a54-aff5-4519-9a44-d17cc6001208-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.241037 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.262327 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.281202 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.283033 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.283275 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.783239501 +0000 UTC m=+144.229145894 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.284182 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.284696 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.784685627 +0000 UTC m=+144.230592020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.302080 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.312816 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-serving-cert\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.321857 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.333918 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-config\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.340842 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.360725 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.381518 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.385980 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.386296 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.886258483 +0000 UTC m=+144.332164906 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.387260 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.387833 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.887810832 +0000 UTC m=+144.333717225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.401387 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.422053 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.436270 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e8823a3-69b2-4e0d-9c22-61c698970b38-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.442001 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.446089 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5013fa35-7674-41bd-83a4-d01ff22253e9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k7vzj\" (UID: \"5013fa35-7674-41bd-83a4-d01ff22253e9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.461129 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.462789 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e8823a3-69b2-4e0d-9c22-61c698970b38-config\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.481827 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.489154 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.489371 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.989335948 +0000 UTC m=+144.435242331 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.490296 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.490669 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:20.990658911 +0000 UTC m=+144.436565294 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.510354 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.517704 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-trusted-ca\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.521006 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.540460 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.555905 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-metrics-tls\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.561890 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.580903 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.591604 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.591735 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.091704235 +0000 UTC m=+144.537610618 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.592919 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.593523 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.093503639 +0000 UTC m=+144.539410022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.601953 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.608189 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d5aae7df-27bd-4553-8367-c4b6c65906ec-signing-key\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.621517 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.639468 4959 request.go:700] Waited for 1.015735273s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca/configmaps?fieldSelector=metadata.name%3Dsigning-cabundle&limit=500&resourceVersion=0 Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.642083 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.645240 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d5aae7df-27bd-4553-8367-c4b6c65906ec-signing-cabundle\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.661851 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.681325 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.693667 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.694246 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.194208305 +0000 UTC m=+144.640114698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.694563 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.694950 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.194932374 +0000 UTC m=+144.640838757 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.701232 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.720434 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.754733 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgvdr\" (UniqueName: \"kubernetes.io/projected/16e045df-c42a-4f79-9bbc-7504250fdb81-kube-api-access-tgvdr\") pod \"openshift-config-operator-7777fb866f-kbz6d\" (UID: \"16e045df-c42a-4f79-9bbc-7504250fdb81\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.761039 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.767739 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/dfb911c8-efb4-4973-954c-808a8e87f0fe-images\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.780661 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.795615 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.795996 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.295757303 +0000 UTC m=+144.741663696 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.796312 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.796781 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.296770827 +0000 UTC m=+144.742677210 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.800958 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.804577 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.809200 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dfb911c8-efb4-4973-954c-808a8e87f0fe-proxy-tls\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.835171 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7w76\" (UniqueName: \"kubernetes.io/projected/3df41cde-0e93-462b-8391-cbb0dcf6ea4a-kube-api-access-h7w76\") pod \"apiserver-76f77b778f-n5pdh\" (UID: \"3df41cde-0e93-462b-8391-cbb0dcf6ea4a\") " pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.855677 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ncb9\" (UniqueName: \"kubernetes.io/projected/29b47bb2-f090-43a4-b2ea-7bb83b683efb-kube-api-access-8ncb9\") pod \"oauth-openshift-558db77b4-nk6xq\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.875499 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghqx2\" (UniqueName: \"kubernetes.io/projected/e84ed88e-eaf6-433c-b930-93f13ed09fcf-kube-api-access-ghqx2\") pod \"downloads-7954f5f757-gqkxc\" (UID: \"e84ed88e-eaf6-433c-b930-93f13ed09fcf\") " pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.880883 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.897734 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.898327 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.398283273 +0000 UTC m=+144.844189836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.898573 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:20 crc kubenswrapper[4959]: E0128 15:19:20.899146 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.399091563 +0000 UTC m=+144.844997946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.900735 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.907185 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c4683ca-d036-458c-87ab-aaf398a5dbf5-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-mszk6\" (UID: \"3c4683ca-d036-458c-87ab-aaf398a5dbf5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.920613 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.928489 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42901316-efd2-4e0f-9505-f8b0e5cc676c-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.932986 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.941783 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.946679 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0e736f-1d0f-4c59-ab45-b057dda052aa-config-volume\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.953961 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.961013 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.976294 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d"] Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.981135 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 15:19:20 crc kubenswrapper[4959]: I0128 15:19:20.987901 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42901316-efd2-4e0f-9505-f8b0e5cc676c-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.000380 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.001088 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.501047419 +0000 UTC m=+144.946953812 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.001382 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.022202 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.040670 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.060931 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.068977 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-metrics-certs\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.072710 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.081353 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.085728 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0aedd299-b681-4f9f-b92e-d2bf27be7d06-service-ca-bundle\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.101291 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.104870 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.105689 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.605657222 +0000 UTC m=+145.051563605 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.112733 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-default-certificate\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.122314 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.124706 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-n5pdh"] Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.132269 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/0aedd299-b681-4f9f-b92e-d2bf27be7d06-stats-auth\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.140845 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.162056 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.169966 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3c910a54-aff5-4519-9a44-d17cc6001208-srv-cert\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.172124 4959 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.172199 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/62ef9da5-17d7-4dab-9868-9e7c7694b799-config-volume podName:62ef9da5-17d7-4dab-9868-9e7c7694b799 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.672179764 +0000 UTC m=+145.118086147 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/62ef9da5-17d7-4dab-9868-9e7c7694b799-config-volume") pod "dns-default-886ph" (UID: "62ef9da5-17d7-4dab-9868-9e7c7694b799") : failed to sync configmap cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.174319 4959 secret.go:188] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.174353 4959 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.174344 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-nk6xq"] Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.174401 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d2d96b34-2e44-4d18-a591-2c286c762bf9-control-plane-machine-set-operator-tls podName:d2d96b34-2e44-4d18-a591-2c286c762bf9 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.674381719 +0000 UTC m=+145.120288102 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/d2d96b34-2e44-4d18-a591-2c286c762bf9-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-78cbb6b69f-884qg" (UID: "d2d96b34-2e44-4d18-a591-2c286c762bf9") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.174422 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-apiservice-cert podName:f12f8c01-cc1c-4a38-b234-e18bb9ff00d7 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.674414059 +0000 UTC m=+145.120320442 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-apiservice-cert") pod "packageserver-d55dfcdfc-6slvt" (UID: "f12f8c01-cc1c-4a38-b234-e18bb9ff00d7") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.174427 4959 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.174571 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-webhook-cert podName:f12f8c01-cc1c-4a38-b234-e18bb9ff00d7 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.674535932 +0000 UTC m=+145.120442485 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-webhook-cert") pod "packageserver-d55dfcdfc-6slvt" (UID: "f12f8c01-cc1c-4a38-b234-e18bb9ff00d7") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.175559 4959 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.175606 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-certs podName:04f6a584-73bb-4e7d-9d8c-677b58c44944 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.675597419 +0000 UTC m=+145.121503792 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-certs") pod "machine-config-server-9s7sv" (UID: "04f6a584-73bb-4e7d-9d8c-677b58c44944") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.179798 4959 secret.go:188] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.179895 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/97400b4d-3097-4875-a5ec-08f867212a0e-serving-cert podName:97400b4d-3097-4875-a5ec-08f867212a0e nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.679875654 +0000 UTC m=+145.125782027 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/97400b4d-3097-4875-a5ec-08f867212a0e-serving-cert") pod "kube-storage-version-migrator-operator-b67b599dd-jm2jv" (UID: "97400b4d-3097-4875-a5ec-08f867212a0e") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: W0128 15:19:21.180723 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29b47bb2_f090_43a4_b2ea_7bb83b683efb.slice/crio-60250782405c8849376c3fa6d599aed15dcabc0e342ead76d8044cbbc2319135 WatchSource:0}: Error finding container 60250782405c8849376c3fa6d599aed15dcabc0e342ead76d8044cbbc2319135: Status 404 returned error can't find the container with id 60250782405c8849376c3fa6d599aed15dcabc0e342ead76d8044cbbc2319135 Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.181414 4959 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.181477 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.181531 4959 configmap.go:193] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: failed to sync configmap cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.181626 4959 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.181660 4959 secret.go:188] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.181997 4959 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.182496 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-node-bootstrap-token podName:04f6a584-73bb-4e7d-9d8c-677b58c44944 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.681447843 +0000 UTC m=+145.127354226 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-node-bootstrap-token") pod "machine-config-server-9s7sv" (UID: "04f6a584-73bb-4e7d-9d8c-677b58c44944") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.182536 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/97400b4d-3097-4875-a5ec-08f867212a0e-config podName:97400b4d-3097-4875-a5ec-08f867212a0e nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.68252442 +0000 UTC m=+145.128430803 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/97400b4d-3097-4875-a5ec-08f867212a0e-config") pod "kube-storage-version-migrator-operator-b67b599dd-jm2jv" (UID: "97400b4d-3097-4875-a5ec-08f867212a0e") : failed to sync configmap cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.182560 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7cef5bdc-f62b-4872-bc40-59765c1faa27-cert podName:7cef5bdc-f62b-4872-bc40-59765c1faa27 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.682550341 +0000 UTC m=+145.128456714 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7cef5bdc-f62b-4872-bc40-59765c1faa27-cert") pod "ingress-canary-977m5" (UID: "7cef5bdc-f62b-4872-bc40-59765c1faa27") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.182581 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a215a092-653c-4a83-9901-e5094b2c0f12-proxy-tls podName:a215a092-653c-4a83-9901-e5094b2c0f12 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.682573251 +0000 UTC m=+145.128479634 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/a215a092-653c-4a83-9901-e5094b2c0f12-proxy-tls") pod "machine-config-controller-84d6567774-9f8vj" (UID: "a215a092-653c-4a83-9901-e5094b2c0f12") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.182598 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/62ef9da5-17d7-4dab-9868-9e7c7694b799-metrics-tls podName:62ef9da5-17d7-4dab-9868-9e7c7694b799 nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.682592152 +0000 UTC m=+145.128498535 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/62ef9da5-17d7-4dab-9868-9e7c7694b799-metrics-tls") pod "dns-default-886ph" (UID: "62ef9da5-17d7-4dab-9868-9e7c7694b799") : failed to sync secret cache: timed out waiting for the condition Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.201552 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.206563 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.206747 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.706717367 +0000 UTC m=+145.152623760 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.207295 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.207686 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.707675521 +0000 UTC m=+145.153582084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.220761 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.241248 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.241931 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-gqkxc"] Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.259633 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" event={"ID":"29b47bb2-f090-43a4-b2ea-7bb83b683efb","Type":"ContainerStarted","Data":"60250782405c8849376c3fa6d599aed15dcabc0e342ead76d8044cbbc2319135"} Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.260831 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.262792 4959 generic.go:334] "Generic (PLEG): container finished" podID="16e045df-c42a-4f79-9bbc-7504250fdb81" containerID="2791e5fa21c11e7f05b4221eea89be562f9f4e236d1679a98362849e830e454c" exitCode=0 Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.262840 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" event={"ID":"16e045df-c42a-4f79-9bbc-7504250fdb81","Type":"ContainerDied","Data":"2791e5fa21c11e7f05b4221eea89be562f9f4e236d1679a98362849e830e454c"} Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.262856 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" event={"ID":"16e045df-c42a-4f79-9bbc-7504250fdb81","Type":"ContainerStarted","Data":"8a1856b0b004ae10b7c5c72dd32b07f767225cb5b44030b75ce8848d17237fbc"} Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.263931 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" event={"ID":"3df41cde-0e93-462b-8391-cbb0dcf6ea4a","Type":"ContainerStarted","Data":"553dffb8eb4aca590b057c3a796cc55268b92119dc9ae206eb6b9796c531fbf9"} Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.280686 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.300879 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.308669 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.308798 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.808770606 +0000 UTC m=+145.254676989 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.309196 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.309781 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.809769151 +0000 UTC m=+145.255675534 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.320309 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.340875 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.360398 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.383886 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.400318 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.409542 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.410850 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.411142 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.911095802 +0000 UTC m=+145.357002185 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.411626 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.411956 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:21.911943163 +0000 UTC m=+145.357849546 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.420202 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.441180 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.461722 4959 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.481398 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.500997 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.512195 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.512410 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.012375192 +0000 UTC m=+145.458281565 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.512933 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.513394 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.013374497 +0000 UTC m=+145.459280880 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.521580 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.541635 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.562145 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.581862 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.605795 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.614829 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.615571 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.115536599 +0000 UTC m=+145.561442992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.620637 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.639775 4959 request.go:700] Waited for 1.581227243s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/serviceaccounts/registry/token Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.663416 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-bound-sa-token\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.690158 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9njd\" (UniqueName: \"kubernetes.io/projected/3ce471aa-c701-4901-b0ed-c66d86cd0059-kube-api-access-s9njd\") pod \"apiserver-7bbb656c7d-chhqr\" (UID: \"3ce471aa-c701-4901-b0ed-c66d86cd0059\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.714236 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cl5bx\" (UniqueName: \"kubernetes.io/projected/6d9aabc2-ed95-420b-afad-7af29c415329-kube-api-access-cl5bx\") pod \"machine-approver-56656f9798-2sdcc\" (UID: \"6d9aabc2-ed95-420b-afad-7af29c415329\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717373 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97400b4d-3097-4875-a5ec-08f867212a0e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717448 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-certs\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717583 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-node-bootstrap-token\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717638 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62ef9da5-17d7-4dab-9868-9e7c7694b799-metrics-tls\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717660 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7cef5bdc-f62b-4872-bc40-59765c1faa27-cert\") pod \"ingress-canary-977m5\" (UID: \"7cef5bdc-f62b-4872-bc40-59765c1faa27\") " pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717691 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a215a092-653c-4a83-9901-e5094b2c0f12-proxy-tls\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717714 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/62ef9da5-17d7-4dab-9868-9e7c7694b799-config-volume\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717762 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d2d96b34-2e44-4d18-a591-2c286c762bf9-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-884qg\" (UID: \"d2d96b34-2e44-4d18-a591-2c286c762bf9\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717813 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-webhook-cert\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.717884 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-apiservice-cert\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.718050 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97400b4d-3097-4875-a5ec-08f867212a0e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.718096 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.718486 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.21847053 +0000 UTC m=+145.664376913 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.719164 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97400b4d-3097-4875-a5ec-08f867212a0e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.720038 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/62ef9da5-17d7-4dab-9868-9e7c7694b799-config-volume\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.722282 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vszr\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-kube-api-access-2vszr\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.722615 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a215a092-653c-4a83-9901-e5094b2c0f12-proxy-tls\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.723242 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-certs\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.724590 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-apiservice-cert\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.726507 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62ef9da5-17d7-4dab-9868-9e7c7694b799-metrics-tls\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.726680 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-webhook-cert\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.726682 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7cef5bdc-f62b-4872-bc40-59765c1faa27-cert\") pod \"ingress-canary-977m5\" (UID: \"7cef5bdc-f62b-4872-bc40-59765c1faa27\") " pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.727293 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/97400b4d-3097-4875-a5ec-08f867212a0e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.727767 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/04f6a584-73bb-4e7d-9d8c-677b58c44944-node-bootstrap-token\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.728272 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d2d96b34-2e44-4d18-a591-2c286c762bf9-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-884qg\" (UID: \"d2d96b34-2e44-4d18-a591-2c286c762bf9\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.742922 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6p74z\" (UniqueName: \"kubernetes.io/projected/97307cd7-d817-4da5-b2bc-adbb5f1406c8-kube-api-access-6p74z\") pod \"dns-operator-744455d44c-ld2wn\" (UID: \"97307cd7-d817-4da5-b2bc-adbb5f1406c8\") " pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.744401 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.756963 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgxjl\" (UniqueName: \"kubernetes.io/projected/b26918d7-cc7c-4925-8e2f-ba17e60177d5-kube-api-access-mgxjl\") pod \"cluster-samples-operator-665b6dd947-z8frl\" (UID: \"b26918d7-cc7c-4925-8e2f-ba17e60177d5\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.776463 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2whj7\" (UniqueName: \"kubernetes.io/projected/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-kube-api-access-2whj7\") pod \"route-controller-manager-6576b87f9c-bpvnw\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.869965 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.870074 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjmsw\" (UniqueName: \"kubernetes.io/projected/0aedd299-b681-4f9f-b92e-d2bf27be7d06-kube-api-access-qjmsw\") pod \"router-default-5444994796-5xm4l\" (UID: \"0aedd299-b681-4f9f-b92e-d2bf27be7d06\") " pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.871318 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.871846 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.872419 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.37240161 +0000 UTC m=+145.818307993 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.899941 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qg4f\" (UniqueName: \"kubernetes.io/projected/d2d96b34-2e44-4d18-a591-2c286c762bf9-kube-api-access-5qg4f\") pod \"control-plane-machine-set-operator-78cbb6b69f-884qg\" (UID: \"d2d96b34-2e44-4d18-a591-2c286c762bf9\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.900388 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdgtk\" (UniqueName: \"kubernetes.io/projected/5013fa35-7674-41bd-83a4-d01ff22253e9-kube-api-access-fdgtk\") pod \"multus-admission-controller-857f4d67dd-k7vzj\" (UID: \"5013fa35-7674-41bd-83a4-d01ff22253e9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.903468 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hd5gj\" (UID: \"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.909931 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6zgs\" (UniqueName: \"kubernetes.io/projected/62ef9da5-17d7-4dab-9868-9e7c7694b799-kube-api-access-t6zgs\") pod \"dns-default-886ph\" (UID: \"62ef9da5-17d7-4dab-9868-9e7c7694b799\") " pod="openshift-dns/dns-default-886ph" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.918733 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.924244 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r4c5\" (UniqueName: \"kubernetes.io/projected/3c4683ca-d036-458c-87ab-aaf398a5dbf5-kube-api-access-8r4c5\") pod \"package-server-manager-789f6589d5-mszk6\" (UID: \"3c4683ca-d036-458c-87ab-aaf398a5dbf5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.939666 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.943730 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.947144 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc5f4\" (UniqueName: \"kubernetes.io/projected/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-kube-api-access-hc5f4\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.961966 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/42901316-efd2-4e0f-9505-f8b0e5cc676c-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gxh2r\" (UID: \"42901316-efd2-4e0f-9505-f8b0e5cc676c\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.973441 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:21 crc kubenswrapper[4959]: E0128 15:19:21.974130 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.47409263 +0000 UTC m=+145.919999023 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.980976 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ghv5\" (UniqueName: \"kubernetes.io/projected/ce02fde1-7de1-4456-9fa4-6591a4b18b9c-kube-api-access-6ghv5\") pod \"machine-api-operator-5694c8668f-fphck\" (UID: \"ce02fde1-7de1-4456-9fa4-6591a4b18b9c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:21 crc kubenswrapper[4959]: I0128 15:19:21.998722 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glzgm\" (UniqueName: \"kubernetes.io/projected/8b131611-c7ac-4409-9f6f-6f63309e4c55-kube-api-access-glzgm\") pod \"catalog-operator-68c6474976-hkcxz\" (UID: \"8b131611-c7ac-4409-9f6f-6f63309e4c55\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.008673 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.010192 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.012559 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.017465 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rrvg\" (UniqueName: \"kubernetes.io/projected/d2b82dee-9207-4cad-ad4f-364e0c680d4c-kube-api-access-7rrvg\") pod \"csi-hostpathplugin-ctvhn\" (UID: \"d2b82dee-9207-4cad-ad4f-364e0c680d4c\") " pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.028970 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" Jan 28 15:19:22 crc kubenswrapper[4959]: W0128 15:19:22.029953 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ce471aa_c701_4901_b0ed_c66d86cd0059.slice/crio-ab64608efb31a57a005e25da44e764141d15b99424eea834da25528584d7ceef WatchSource:0}: Error finding container ab64608efb31a57a005e25da44e764141d15b99424eea834da25528584d7ceef: Status 404 returned error can't find the container with id ab64608efb31a57a005e25da44e764141d15b99424eea834da25528584d7ceef Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.036021 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znblt\" (UniqueName: \"kubernetes.io/projected/2d4611de-0934-450c-a51e-67298e455900-kube-api-access-znblt\") pod \"marketplace-operator-79b997595-plqj9\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.062164 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvp97\" (UniqueName: \"kubernetes.io/projected/3a0e736f-1d0f-4c59-ab45-b057dda052aa-kube-api-access-jvp97\") pod \"collect-profiles-29493555-dn2f8\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.078862 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.079650 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.579625705 +0000 UTC m=+146.025532088 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.080194 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd4a69ec-a69e-45fa-8105-d50da9b41212-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.085584 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.095573 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1cc89ccf-f7d4-4d82-abf9-088db33dbb05-bound-sa-token\") pod \"ingress-operator-5b745b69d9-nf28j\" (UID: \"1cc89ccf-f7d4-4d82-abf9-088db33dbb05\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.102622 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.107630 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-886ph" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.113154 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.121212 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.129961 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkgqm\" (UniqueName: \"kubernetes.io/projected/06bff249-845d-4278-a2e5-a2a7c54c2f41-kube-api-access-jkgqm\") pod \"controller-manager-879f6c89f-2hr5x\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.135399 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsbq2\" (UniqueName: \"kubernetes.io/projected/0518e394-58c0-41d8-9f03-db75743ff4a8-kube-api-access-xsbq2\") pod \"migrator-59844c95c7-nkcwd\" (UID: \"0518e394-58c0-41d8-9f03-db75743ff4a8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.141429 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:22 crc kubenswrapper[4959]: W0128 15:19:22.154182 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ed3dbaa_b8b3_4455_9f22_3d04817ee1a7.slice/crio-3a88b05fb2f139fba957ce783d74a6464462bbd98677776b6fb34e5d8ed52c9d WatchSource:0}: Error finding container 3a88b05fb2f139fba957ce783d74a6464462bbd98677776b6fb34e5d8ed52c9d: Status 404 returned error can't find the container with id 3a88b05fb2f139fba957ce783d74a6464462bbd98677776b6fb34e5d8ed52c9d Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.163926 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkftf\" (UniqueName: \"kubernetes.io/projected/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-kube-api-access-lkftf\") pod \"console-f9d7485db-b7ncr\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.181732 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.182778 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.68274636 +0000 UTC m=+146.128652743 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.183587 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.189551 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r975\" (UniqueName: \"kubernetes.io/projected/f12f8c01-cc1c-4a38-b234-e18bb9ff00d7-kube-api-access-2r975\") pod \"packageserver-d55dfcdfc-6slvt\" (UID: \"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.193092 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.196983 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.206289 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxtzt\" (UniqueName: \"kubernetes.io/projected/7cef5bdc-f62b-4872-bc40-59765c1faa27-kube-api-access-qxtzt\") pod \"ingress-canary-977m5\" (UID: \"7cef5bdc-f62b-4872-bc40-59765c1faa27\") " pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.216823 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfgns\" (UniqueName: \"kubernetes.io/projected/dfb911c8-efb4-4973-954c-808a8e87f0fe-kube-api-access-jfgns\") pod \"machine-config-operator-74547568cd-dmmbc\" (UID: \"dfb911c8-efb4-4973-954c-808a8e87f0fe\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.226983 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.235221 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:22 crc kubenswrapper[4959]: W0128 15:19:22.237636 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18c4c5d0_14c0_4ca4_ad3c_e521ab20ff61.slice/crio-272f6db37b376dcaa31cc1e3923f065fdb0a903f562f71ab3774465a8c600394 WatchSource:0}: Error finding container 272f6db37b376dcaa31cc1e3923f065fdb0a903f562f71ab3774465a8c600394: Status 404 returned error can't find the container with id 272f6db37b376dcaa31cc1e3923f065fdb0a903f562f71ab3774465a8c600394 Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.242943 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55ppf\" (UniqueName: \"kubernetes.io/projected/ad1ad920-e620-4cd9-a1dd-3773290fbfc3-kube-api-access-55ppf\") pod \"service-ca-operator-777779d784-ljp65\" (UID: \"ad1ad920-e620-4cd9-a1dd-3773290fbfc3\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.252342 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.256351 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vp5q\" (UniqueName: \"kubernetes.io/projected/6d14429e-3f90-4010-9b67-b0b6e81db122-kube-api-access-8vp5q\") pod \"etcd-operator-b45778765-ddn7x\" (UID: \"6d14429e-3f90-4010-9b67-b0b6e81db122\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.268333 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.283715 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.284251 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.784230466 +0000 UTC m=+146.230136849 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.284164 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.285078 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.285345 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxxvw\" (UniqueName: \"kubernetes.io/projected/cd4a69ec-a69e-45fa-8105-d50da9b41212-kube-api-access-nxxvw\") pod \"cluster-image-registry-operator-dc59b4c8b-fv8nc\" (UID: \"cd4a69ec-a69e-45fa-8105-d50da9b41212\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.285699 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.785684052 +0000 UTC m=+146.231590435 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.294294 4959 generic.go:334] "Generic (PLEG): container finished" podID="3df41cde-0e93-462b-8391-cbb0dcf6ea4a" containerID="551cfcdc43c1f195d2855bd98339f0131ce0974c24dfbb88363756554a077ffb" exitCode=0 Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.294360 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" event={"ID":"3df41cde-0e93-462b-8391-cbb0dcf6ea4a","Type":"ContainerDied","Data":"551cfcdc43c1f195d2855bd98339f0131ce0974c24dfbb88363756554a077ffb"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.298609 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.298797 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.307213 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zp9s\" (UniqueName: \"kubernetes.io/projected/0673f696-f58d-4980-8858-3a9aa97eb9dc-kube-api-access-9zp9s\") pod \"openshift-apiserver-operator-796bbdcf4f-w4dqm\" (UID: \"0673f696-f58d-4980-8858-3a9aa97eb9dc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.328209 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-5xm4l" event={"ID":"0aedd299-b681-4f9f-b92e-d2bf27be7d06","Type":"ContainerStarted","Data":"126b24b08f96bf5c3ee3b2b46fb4d88939f9f8114047718ba67abf7f6c7a18e6"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.331872 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t69w\" (UniqueName: \"kubernetes.io/projected/a215a092-653c-4a83-9901-e5094b2c0f12-kube-api-access-2t69w\") pod \"machine-config-controller-84d6567774-9f8vj\" (UID: \"a215a092-653c-4a83-9901-e5094b2c0f12\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.332215 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" event={"ID":"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61","Type":"ContainerStarted","Data":"272f6db37b376dcaa31cc1e3923f065fdb0a903f562f71ab3774465a8c600394"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.337143 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.340679 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.355079 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-977m5" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.357345 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.364565 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" event={"ID":"6d9aabc2-ed95-420b-afad-7af29c415329","Type":"ContainerStarted","Data":"d110b2efb4830af3d05d059a30bc99fd26fa5845f89fa6d1e1e679cc47429c2f"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.364628 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" event={"ID":"6d9aabc2-ed95-420b-afad-7af29c415329","Type":"ContainerStarted","Data":"95a6983b3bd5eac41f1c146e11e99a0adce68c4453d197d2da8a593f225925c0"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.380069 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lkzr\" (UniqueName: \"kubernetes.io/projected/6c2d3b29-3f92-4a16-9a53-0e98fb387802-kube-api-access-9lkzr\") pod \"authentication-operator-69f744f599-5vzvg\" (UID: \"6c2d3b29-3f92-4a16-9a53-0e98fb387802\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.381089 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbllz\" (UniqueName: \"kubernetes.io/projected/04f6a584-73bb-4e7d-9d8c-677b58c44944-kube-api-access-zbllz\") pod \"machine-config-server-9s7sv\" (UID: \"04f6a584-73bb-4e7d-9d8c-677b58c44944\") " pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.387249 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.391676 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.891638657 +0000 UTC m=+146.337545040 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.392659 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26g5c\" (UniqueName: \"kubernetes.io/projected/3c910a54-aff5-4519-9a44-d17cc6001208-kube-api-access-26g5c\") pod \"olm-operator-6b444d44fb-4qzbj\" (UID: \"3c910a54-aff5-4519-9a44-d17cc6001208\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.395490 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" event={"ID":"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7","Type":"ContainerStarted","Data":"3a88b05fb2f139fba957ce783d74a6464462bbd98677776b6fb34e5d8ed52c9d"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.400066 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.401210 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-9s7sv" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.401750 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-gqkxc" event={"ID":"e84ed88e-eaf6-433c-b930-93f13ed09fcf","Type":"ContainerStarted","Data":"9c86dde11baffef5192e034b43d7075397463d03af1f58cb31b0830d129fbe9c"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.401887 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.401906 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-gqkxc" event={"ID":"e84ed88e-eaf6-433c-b930-93f13ed09fcf","Type":"ContainerStarted","Data":"470ca5adefdeb5f80cec8378a80790779943c55801123f8544d6dd6f3c33d0ce"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.406549 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" event={"ID":"29b47bb2-f090-43a4-b2ea-7bb83b683efb","Type":"ContainerStarted","Data":"e754bf65fa3e302711fcc3cc917c91511b61384f2e777ba0c4b23ef1c80e4689"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.406819 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.406869 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.407314 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.407876 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vxxr\" (UniqueName: \"kubernetes.io/projected/83cc5140-ea3b-4939-a8c8-46566dea8c2d-kube-api-access-8vxxr\") pod \"openshift-controller-manager-operator-756b6f6bc6-qzhgw\" (UID: \"83cc5140-ea3b-4939-a8c8-46566dea8c2d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.411995 4959 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-nk6xq container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.38:6443/healthz\": dial tcp 10.217.0.38:6443: connect: connection refused" start-of-body= Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.412041 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.38:6443/healthz\": dial tcp 10.217.0.38:6443: connect: connection refused" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.416467 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw8xl\" (UniqueName: \"kubernetes.io/projected/d5aae7df-27bd-4553-8367-c4b6c65906ec-kube-api-access-jw8xl\") pod \"service-ca-9c57cc56f-zklvs\" (UID: \"d5aae7df-27bd-4553-8367-c4b6c65906ec\") " pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.418464 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" event={"ID":"3ce471aa-c701-4901-b0ed-c66d86cd0059","Type":"ContainerStarted","Data":"ab64608efb31a57a005e25da44e764141d15b99424eea834da25528584d7ceef"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.421815 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" event={"ID":"16e045df-c42a-4f79-9bbc-7504250fdb81","Type":"ContainerStarted","Data":"8c490d3713387abec759db9323c1b3f58ad683fdc23dd28b7bb6366710906b4b"} Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.422050 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.446883 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7e8823a3-69b2-4e0d-9c22-61c698970b38-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zj4pq\" (UID: \"7e8823a3-69b2-4e0d-9c22-61c698970b38\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.464339 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5tf7\" (UniqueName: \"kubernetes.io/projected/97400b4d-3097-4875-a5ec-08f867212a0e-kube-api-access-b5tf7\") pod \"kube-storage-version-migrator-operator-b67b599dd-jm2jv\" (UID: \"97400b4d-3097-4875-a5ec-08f867212a0e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.466162 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.474533 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k7vzj"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.475317 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.489433 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.490182 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ld2wn"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.490255 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.490688 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:22.990668842 +0000 UTC m=+146.436575225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.503482 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.512239 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.517817 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbrrc\" (UniqueName: \"kubernetes.io/projected/994c73fc-b24b-4cef-8145-561cc1dca4c7-kube-api-access-hbrrc\") pod \"console-operator-58897d9998-dmkr9\" (UID: \"994c73fc-b24b-4cef-8145-561cc1dca4c7\") " pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.559636 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" Jan 28 15:19:22 crc kubenswrapper[4959]: W0128 15:19:22.571068 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97307cd7_d817_4da5_b2bc_adbb5f1406c8.slice/crio-f578b8e86428add42a19777a1cf1ff326cbd04ad3d346d64ee8e64f68723b970 WatchSource:0}: Error finding container f578b8e86428add42a19777a1cf1ff326cbd04ad3d346d64ee8e64f68723b970: Status 404 returned error can't find the container with id f578b8e86428add42a19777a1cf1ff326cbd04ad3d346d64ee8e64f68723b970 Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.578166 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.591313 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.591999 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.091963313 +0000 UTC m=+146.537869696 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.592274 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.593711 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.093702976 +0000 UTC m=+146.539609359 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.619850 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.648737 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.695214 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.696586 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.196556844 +0000 UTC m=+146.642463227 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.770211 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.806615 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.807375 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.307359889 +0000 UTC m=+146.753266272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.813002 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hr5x"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.844989 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-886ph"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.860511 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-ctvhn"] Jan 28 15:19:22 crc kubenswrapper[4959]: I0128 15:19:22.908005 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:22 crc kubenswrapper[4959]: E0128 15:19:22.908543 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.408523317 +0000 UTC m=+146.854429700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.009625 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.010727 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.51070626 +0000 UTC m=+146.956612643 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.117909 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.118594 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.618571802 +0000 UTC m=+147.064478185 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.227786 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.228313 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.72829277 +0000 UTC m=+147.174199153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.329968 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.330414 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.83037381 +0000 UTC m=+147.276280193 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.434621 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.435679 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:23.935663479 +0000 UTC m=+147.381569862 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.484671 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" event={"ID":"18c4c5d0-14c0-4ca4-ad3c-e521ab20ff61","Type":"ContainerStarted","Data":"a57096cb3eb740b19b5787046a3598aa98a6f48da379ee725a60fae36aa8771b"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.520981 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" event={"ID":"97307cd7-d817-4da5-b2bc-adbb5f1406c8","Type":"ContainerStarted","Data":"f578b8e86428add42a19777a1cf1ff326cbd04ad3d346d64ee8e64f68723b970"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.538475 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.539248 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.039210035 +0000 UTC m=+147.485116418 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.539631 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.539677 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" event={"ID":"d2d96b34-2e44-4d18-a591-2c286c762bf9","Type":"ContainerStarted","Data":"925b6174a99d0b8ce9d1631e5ec7b05f950c054859221ad0700280ab866a3c11"} Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.540032 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.040016106 +0000 UTC m=+147.485922489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.560042 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" event={"ID":"d2b82dee-9207-4cad-ad4f-364e0c680d4c","Type":"ContainerStarted","Data":"68d3357f61355901ea1529a148eb4b09d6791884f63d6a485fe69469af30b6d8"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.563870 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-b7ncr"] Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.575134 4959 generic.go:334] "Generic (PLEG): container finished" podID="3ce471aa-c701-4901-b0ed-c66d86cd0059" containerID="e6cc61f36ab21aa1ba24b41a9cd8d9436e79e762b8befb8c266528b41ea7637b" exitCode=0 Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.575317 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" event={"ID":"3ce471aa-c701-4901-b0ed-c66d86cd0059","Type":"ContainerDied","Data":"e6cc61f36ab21aa1ba24b41a9cd8d9436e79e762b8befb8c266528b41ea7637b"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.596401 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6"] Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.597293 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-9s7sv" event={"ID":"04f6a584-73bb-4e7d-9d8c-677b58c44944","Type":"ContainerStarted","Data":"89c81f2e057fcafd65e360104a603e0d5281c79cb1d2dbf7d15171d755da40c9"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.598641 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-fphck"] Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.609126 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" event={"ID":"6d9aabc2-ed95-420b-afad-7af29c415329","Type":"ContainerStarted","Data":"bda8eaf0f96bbda96e6aec38b40fb8745e5f2b7dbc2445f69c4158ee794b465c"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.640837 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.642337 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.142315071 +0000 UTC m=+147.588221454 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.644460 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" event={"ID":"06bff249-845d-4278-a2e5-a2a7c54c2f41","Type":"ContainerStarted","Data":"3b4cb9c0f8c8747cf8386628dca9b966ad0be163c57293802a1ba8fe6699f298"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.657041 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" event={"ID":"5013fa35-7674-41bd-83a4-d01ff22253e9","Type":"ContainerStarted","Data":"9f1d2230da8f6f18171dd7275f02026a93ff8147984022d6d502cd41df254ed0"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.662226 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r"] Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.674830 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plqj9"] Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.685343 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz"] Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.704599 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-886ph" event={"ID":"62ef9da5-17d7-4dab-9868-9e7c7694b799","Type":"ContainerStarted","Data":"3233c1b4e68cb49cfe5ad74131eba430e3d02473938eb8276ed02046961808ea"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.731695 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" podStartSLOduration=120.731652287 podStartE2EDuration="2m0.731652287s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:23.719766593 +0000 UTC m=+147.165672976" watchObservedRunningTime="2026-01-28 15:19:23.731652287 +0000 UTC m=+147.177558690" Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.737791 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" event={"ID":"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7","Type":"ContainerStarted","Data":"7dc244ecebf6f3fa789cefb450c713ccae24c8d588ed9c1397addcb520b03940"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.740279 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.746705 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.748624 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.248608735 +0000 UTC m=+147.694515118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.789471 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" event={"ID":"b26918d7-cc7c-4925-8e2f-ba17e60177d5","Type":"ContainerStarted","Data":"a6b5840802769d372682633a14540d4749d1d458f7afd61ff1d8effff1c53802"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.825433 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-5xm4l" event={"ID":"0aedd299-b681-4f9f-b92e-d2bf27be7d06","Type":"ContainerStarted","Data":"c90449badff563d7dc317e030011b05908f338817f4d8250bcd4fb54dbf93a68"} Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.846334 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.846775 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.846567 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.847873 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.849365 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.349328441 +0000 UTC m=+147.795234954 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:23 crc kubenswrapper[4959]: I0128 15:19:23.951258 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:23 crc kubenswrapper[4959]: E0128 15:19:23.954972 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.454954378 +0000 UTC m=+147.900860761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.001231 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" podStartSLOduration=121.00120776 podStartE2EDuration="2m1.00120776s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:24.000498913 +0000 UTC m=+147.446405306" watchObservedRunningTime="2026-01-28 15:19:24.00120776 +0000 UTC m=+147.447114143" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.001420 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-gqkxc" podStartSLOduration=121.001415485 podStartE2EDuration="2m1.001415485s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:23.952362044 +0000 UTC m=+147.398268437" watchObservedRunningTime="2026-01-28 15:19:24.001415485 +0000 UTC m=+147.447321868" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.015248 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.046929 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:24 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:24 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:24 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.047008 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.052682 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.053230 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.553206603 +0000 UTC m=+147.999112986 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.093958 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ljp65"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.111933 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.121623 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.155630 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.156411 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.656388341 +0000 UTC m=+148.102294724 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.226228 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.257402 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.257700 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.75765742 +0000 UTC m=+148.203563803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.258002 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.258602 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.758592974 +0000 UTC m=+148.204499357 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: W0128 15:19:24.277558 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad1ad920_e620_4cd9_a1dd_3773290fbfc3.slice/crio-0095dcd8db8192b3bed4c908bf289c5419a30a99c664b4d8616b6f18a71ba4ed WatchSource:0}: Error finding container 0095dcd8db8192b3bed4c908bf289c5419a30a99c664b4d8616b6f18a71ba4ed: Status 404 returned error can't find the container with id 0095dcd8db8192b3bed4c908bf289c5419a30a99c664b4d8616b6f18a71ba4ed Jan 28 15:19:24 crc kubenswrapper[4959]: W0128 15:19:24.308088 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0518e394_58c0_41d8_9f03_db75743ff4a8.slice/crio-56d51ef1299b975696fd82aaa7e58595d53f69eed4536646bab15e3362289737 WatchSource:0}: Error finding container 56d51ef1299b975696fd82aaa7e58595d53f69eed4536646bab15e3362289737: Status 404 returned error can't find the container with id 56d51ef1299b975696fd82aaa7e58595d53f69eed4536646bab15e3362289737 Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.310944 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2sdcc" podStartSLOduration=121.310926425 podStartE2EDuration="2m1.310926425s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:24.308457714 +0000 UTC m=+147.754364097" watchObservedRunningTime="2026-01-28 15:19:24.310926425 +0000 UTC m=+147.756832808" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.321002 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.322223 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.345038 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5vzvg"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.346409 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.349846 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.351718 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hd5gj" podStartSLOduration=120.351702332 podStartE2EDuration="2m0.351702332s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:24.34878145 +0000 UTC m=+147.794687843" watchObservedRunningTime="2026-01-28 15:19:24.351702332 +0000 UTC m=+147.797608715" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.355506 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-977m5"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.359829 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.360184 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.8601563 +0000 UTC m=+148.306062683 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.360447 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.360862 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.860853958 +0000 UTC m=+148.306760341 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.410946 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-ddn7x"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.428432 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.443184 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" podStartSLOduration=120.44316219 podStartE2EDuration="2m0.44316219s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:24.441641022 +0000 UTC m=+147.887547405" watchObservedRunningTime="2026-01-28 15:19:24.44316219 +0000 UTC m=+147.889068573" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.446200 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.454385 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.463087 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.463920 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.96386333 +0000 UTC m=+148.409769713 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.465173 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.465797 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:24.965768188 +0000 UTC m=+148.411674571 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.514259 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-5xm4l" podStartSLOduration=120.514241784 podStartE2EDuration="2m0.514241784s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:24.513002244 +0000 UTC m=+147.958908647" watchObservedRunningTime="2026-01-28 15:19:24.514241784 +0000 UTC m=+147.960148167" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.567176 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.567423 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.067380086 +0000 UTC m=+148.513286509 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.669570 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.670204 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.170186024 +0000 UTC m=+148.616092407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.771354 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.771539 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.271508605 +0000 UTC m=+148.717414988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.771684 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.772044 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.272036288 +0000 UTC m=+148.717942661 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.781449 4959 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-bpvnw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.781532 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" podUID="3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.815281 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-dmkr9"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.821466 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zklvs"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.825460 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.830756 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj"] Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.832475 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" event={"ID":"1cc89ccf-f7d4-4d82-abf9-088db33dbb05","Type":"ContainerStarted","Data":"bd2f96e371e9f363605af11c024853f6a7b5739289c4ce378eded195bfd721a8"} Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.833639 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-b7ncr" event={"ID":"0032b760-b9d9-4533-ae6c-dfe3e55d16e6","Type":"ContainerStarted","Data":"3159177cfe3f7be6135a83cc3bd67f789c63cf041e39687871a11fd52d484d8c"} Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.839217 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" event={"ID":"42901316-efd2-4e0f-9505-f8b0e5cc676c","Type":"ContainerStarted","Data":"efa133e7b48a1af2c0bcfd0bb2de2e2c1d4e14418d14a3bd80363c39a8c9edaa"} Jan 28 15:19:24 crc kubenswrapper[4959]: W0128 15:19:24.850162 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c2d3b29_3f92_4a16_9a53_0e98fb387802.slice/crio-aa93d72d2d75f3bc9da3ee78f5d2439d058433d62cfc1f5799711f66a6060c5d WatchSource:0}: Error finding container aa93d72d2d75f3bc9da3ee78f5d2439d058433d62cfc1f5799711f66a6060c5d: Status 404 returned error can't find the container with id aa93d72d2d75f3bc9da3ee78f5d2439d058433d62cfc1f5799711f66a6060c5d Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.851854 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" event={"ID":"3c4683ca-d036-458c-87ab-aaf398a5dbf5","Type":"ContainerStarted","Data":"a2bf0ee853d2a566e6ae060908591f528143736475c6a7380937e044ded97b16"} Jan 28 15:19:24 crc kubenswrapper[4959]: W0128 15:19:24.868366 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddfb911c8_efb4_4973_954c_808a8e87f0fe.slice/crio-fd7c8b24dc5628a59915dd9212dbfc1b919197ec71d88f680c51a2f2b4d108d9 WatchSource:0}: Error finding container fd7c8b24dc5628a59915dd9212dbfc1b919197ec71d88f680c51a2f2b4d108d9: Status 404 returned error can't find the container with id fd7c8b24dc5628a59915dd9212dbfc1b919197ec71d88f680c51a2f2b4d108d9 Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.872443 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.872794 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.372777865 +0000 UTC m=+148.818684238 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.891433 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-9s7sv" event={"ID":"04f6a584-73bb-4e7d-9d8c-677b58c44944","Type":"ContainerStarted","Data":"ef1e1c8ab128438fee8c42277edd4a1f63c5be99866164c22df83daf2ec68cce"} Jan 28 15:19:24 crc kubenswrapper[4959]: W0128 15:19:24.891886 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d14429e_3f90_4010_9b67_b0b6e81db122.slice/crio-a0382b2c7367c81e68e579cbe50daf6c93f28d771614488849022456b6a1735a WatchSource:0}: Error finding container a0382b2c7367c81e68e579cbe50daf6c93f28d771614488849022456b6a1735a: Status 404 returned error can't find the container with id a0382b2c7367c81e68e579cbe50daf6c93f28d771614488849022456b6a1735a Jan 28 15:19:24 crc kubenswrapper[4959]: W0128 15:19:24.915314 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83cc5140_ea3b_4939_a8c8_46566dea8c2d.slice/crio-ce88951f800c666c9dc71fd7ce820b2ce70528a72ca11ff52a0be00b78886bf3 WatchSource:0}: Error finding container ce88951f800c666c9dc71fd7ce820b2ce70528a72ca11ff52a0be00b78886bf3: Status 404 returned error can't find the container with id ce88951f800c666c9dc71fd7ce820b2ce70528a72ca11ff52a0be00b78886bf3 Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.923218 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" event={"ID":"0518e394-58c0-41d8-9f03-db75743ff4a8","Type":"ContainerStarted","Data":"56d51ef1299b975696fd82aaa7e58595d53f69eed4536646bab15e3362289737"} Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.930035 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" event={"ID":"2d4611de-0934-450c-a51e-67298e455900","Type":"ContainerStarted","Data":"df1f96ba7469dce2921fd54cb92f262c9043c1c49b4bac4fd412727232b17138"} Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.965247 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" event={"ID":"3df41cde-0e93-462b-8391-cbb0dcf6ea4a","Type":"ContainerStarted","Data":"4d365c851007ff0ca93e815424461c8ca9fe1cc24c65b04b407b4586e44028fe"} Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.971553 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" event={"ID":"b26918d7-cc7c-4925-8e2f-ba17e60177d5","Type":"ContainerStarted","Data":"bfc77712f2ffcdefd72c678ac1618849844d220af3b6caa576b632eb57afcdca"} Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.975016 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:24 crc kubenswrapper[4959]: E0128 15:19:24.975538 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.475520281 +0000 UTC m=+148.921426664 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:24 crc kubenswrapper[4959]: I0128 15:19:24.981352 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" event={"ID":"ce02fde1-7de1-4456-9fa4-6591a4b18b9c","Type":"ContainerStarted","Data":"51ba6bb17e60a248ba616d81376835da1a9b898c8d1b84884b2dc25c8fe8715f"} Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.025953 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" event={"ID":"06bff249-845d-4278-a2e5-a2a7c54c2f41","Type":"ContainerStarted","Data":"af24165060dbe0e98fe1f381d333546ba7f4675cd28dc47f80cb5289034ba401"} Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.029551 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.030085 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:25 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:25 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:25 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.030803 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.030738 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" event={"ID":"8b131611-c7ac-4409-9f6f-6f63309e4c55","Type":"ContainerStarted","Data":"54d755a8daae5bc1e06d31a5a204fe21bbf52a6c8a871cd35be49331bee6bed0"} Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.044610 4959 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2hr5x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.044678 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" podUID="06bff249-845d-4278-a2e5-a2a7c54c2f41" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.046095 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" event={"ID":"3a0e736f-1d0f-4c59-ab45-b057dda052aa","Type":"ContainerStarted","Data":"47ec97b9feae4b44444da076b24ac8ca48fa7cd31efada6300ab4edbe81a2b0a"} Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.056248 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" event={"ID":"ad1ad920-e620-4cd9-a1dd-3773290fbfc3","Type":"ContainerStarted","Data":"0095dcd8db8192b3bed4c908bf289c5419a30a99c664b4d8616b6f18a71ba4ed"} Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.064990 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" event={"ID":"97307cd7-d817-4da5-b2bc-adbb5f1406c8","Type":"ContainerStarted","Data":"c5bfcf0ce39eb493e96a3afb34e558065e9c8081fbd1979c501f16caec33bf0a"} Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.084001 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.084511 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.584492241 +0000 UTC m=+149.030398624 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.087288 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.185632 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.187514 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.687498833 +0000 UTC m=+149.133405216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.284304 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" podStartSLOduration=121.284277872 podStartE2EDuration="2m1.284277872s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:25.281791702 +0000 UTC m=+148.727698095" watchObservedRunningTime="2026-01-28 15:19:25.284277872 +0000 UTC m=+148.730184255" Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.290322 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.290548 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.790515977 +0000 UTC m=+149.236422360 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.290690 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.291199 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.791179423 +0000 UTC m=+149.237085806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.392474 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.392654 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.892625937 +0000 UTC m=+149.338532320 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.393319 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.393786 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.893778396 +0000 UTC m=+149.339684779 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.494278 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.494546 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.994510553 +0000 UTC m=+149.440416936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.494917 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.495317 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:25.995309142 +0000 UTC m=+149.441215525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.596166 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.596334 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.096298725 +0000 UTC m=+149.542205098 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.596639 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.597057 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.097041873 +0000 UTC m=+149.542948256 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.698942 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.699171 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.199134904 +0000 UTC m=+149.645041287 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.699876 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.700316 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.200300783 +0000 UTC m=+149.646207166 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.808012 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.808436 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.308416651 +0000 UTC m=+149.754323034 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:25 crc kubenswrapper[4959]: I0128 15:19:25.909291 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:25 crc kubenswrapper[4959]: E0128 15:19:25.909948 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.409924157 +0000 UTC m=+149.855830720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.011439 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.011629 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.511594787 +0000 UTC m=+149.957501170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.011883 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.021537 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.521507801 +0000 UTC m=+149.967414204 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.030098 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:26 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:26 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:26 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.030179 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.113648 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.113681 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-977m5" event={"ID":"7cef5bdc-f62b-4872-bc40-59765c1faa27","Type":"ContainerStarted","Data":"ae7d650b10b483877ece1acf6150b8c168e747e369829eb9f20bdd90483a17dc"} Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.129822 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.629773564 +0000 UTC m=+150.075679947 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.129941 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-977m5" event={"ID":"7cef5bdc-f62b-4872-bc40-59765c1faa27","Type":"ContainerStarted","Data":"333b2c1e25e93bcb18d158fc850e737d3f6368f08ac1558f4a9a0f1833e50fef"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.130297 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.130819 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.630795029 +0000 UTC m=+150.076701412 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.165400 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" event={"ID":"3ce471aa-c701-4901-b0ed-c66d86cd0059","Type":"ContainerStarted","Data":"b885e61ae00fe8bd431a459e66e3092acd3806f34f1fd01ee13a9d362cb3a884"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.227832 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-977m5" podStartSLOduration=7.227804364 podStartE2EDuration="7.227804364s" podCreationTimestamp="2026-01-28 15:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.165967658 +0000 UTC m=+149.611874051" watchObservedRunningTime="2026-01-28 15:19:26.227804364 +0000 UTC m=+149.673710747" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.230403 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" podStartSLOduration=122.230393398 podStartE2EDuration="2m2.230393398s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.191670782 +0000 UTC m=+149.637577175" watchObservedRunningTime="2026-01-28 15:19:26.230393398 +0000 UTC m=+149.676299771" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.233543 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.234978 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.73495814 +0000 UTC m=+150.180864523 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.239770 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" event={"ID":"3a0e736f-1d0f-4c59-ab45-b057dda052aa","Type":"ContainerStarted","Data":"751135847f1126e12a4a274ff297bcc95a44c2ee3644d4f94d92347be0a41934"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.261826 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" event={"ID":"42901316-efd2-4e0f-9505-f8b0e5cc676c","Type":"ContainerStarted","Data":"c22f1af4a61e313f41a0101b8f45749a73d2fd8038bbf08c38047b8e2dfe1e4a"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.266179 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" event={"ID":"cd4a69ec-a69e-45fa-8105-d50da9b41212","Type":"ContainerStarted","Data":"b63e51eb41a6a2c4752d9d772d769ccb4329666b16ca91fa33e038994344ef60"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.268415 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" podStartSLOduration=123.268401056 podStartE2EDuration="2m3.268401056s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.26652214 +0000 UTC m=+149.712428543" watchObservedRunningTime="2026-01-28 15:19:26.268401056 +0000 UTC m=+149.714307439" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.281537 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" event={"ID":"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7","Type":"ContainerStarted","Data":"899ea906c11d0858e2c2bfcc396338e6e4b8e8d83884280fa9345d05c59cac4b"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.281599 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" event={"ID":"f12f8c01-cc1c-4a38-b234-e18bb9ff00d7","Type":"ContainerStarted","Data":"66d3394aebaf40ce05f977ed02e0c8378a28bc55ed65d41f8e1cea2b759c472d"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.283059 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.287491 4959 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6slvt container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" start-of-body= Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.287562 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" podUID="f12f8c01-cc1c-4a38-b234-e18bb9ff00d7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.294252 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gxh2r" podStartSLOduration=122.294098791 podStartE2EDuration="2m2.294098791s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.289843776 +0000 UTC m=+149.735750169" watchObservedRunningTime="2026-01-28 15:19:26.294098791 +0000 UTC m=+149.740005174" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.294707 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" event={"ID":"3df41cde-0e93-462b-8391-cbb0dcf6ea4a","Type":"ContainerStarted","Data":"153530a5f3cb6bf0264091a81c3498a9b79ec8e356b78d7322ea7c52b16894d0"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.319597 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" event={"ID":"0673f696-f58d-4980-8858-3a9aa97eb9dc","Type":"ContainerStarted","Data":"9a9826b299b1adb6ff94eb8adeefe093c3498e399e6b127e6180b85ced6d6830"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.319663 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" event={"ID":"0673f696-f58d-4980-8858-3a9aa97eb9dc","Type":"ContainerStarted","Data":"a8f53d7049befc94206e641abdcfe42d0dd34efde1e534c2c64ce1426da6e926"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.323638 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" event={"ID":"a215a092-653c-4a83-9901-e5094b2c0f12","Type":"ContainerStarted","Data":"7c29213cebf4eac67cbdb8df5484eaa62028242ba01cabc3d4d2f7e1ea4198dc"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.326148 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" event={"ID":"ce02fde1-7de1-4456-9fa4-6591a4b18b9c","Type":"ContainerStarted","Data":"c0cc5f696711043afeb6705c0a6a7cf51607b9281275486048a426e62e99a9e7"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.326506 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" podStartSLOduration=122.326480261 podStartE2EDuration="2m2.326480261s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.323283751 +0000 UTC m=+149.769190144" watchObservedRunningTime="2026-01-28 15:19:26.326480261 +0000 UTC m=+149.772386644" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.338753 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.339479 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.839460811 +0000 UTC m=+150.285367374 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.365054 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" event={"ID":"2d4611de-0934-450c-a51e-67298e455900","Type":"ContainerStarted","Data":"ae4343e6208f796196fabbce1d4dc7387a3ed9b393247c0c2d168565e75ec662"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.365768 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.372942 4959 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plqj9 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.373004 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.386092 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" event={"ID":"d2d96b34-2e44-4d18-a591-2c286c762bf9","Type":"ContainerStarted","Data":"ef2f85d4760e3ca9e19e1965537a8fcb336262915362512610b1a22945136d74"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.406635 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-w4dqm" podStartSLOduration=123.406617578 podStartE2EDuration="2m3.406617578s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.362381156 +0000 UTC m=+149.808287529" watchObservedRunningTime="2026-01-28 15:19:26.406617578 +0000 UTC m=+149.852523961" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.407845 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" podStartSLOduration=123.407837959 podStartE2EDuration="2m3.407837959s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.404855205 +0000 UTC m=+149.850761608" watchObservedRunningTime="2026-01-28 15:19:26.407837959 +0000 UTC m=+149.853744342" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.409677 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" event={"ID":"ad1ad920-e620-4cd9-a1dd-3773290fbfc3","Type":"ContainerStarted","Data":"75444e9e859945368d632ba2381129e3138a869369e9f4a89bceaf9f49644a90"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.434404 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" event={"ID":"1cc89ccf-f7d4-4d82-abf9-088db33dbb05","Type":"ContainerStarted","Data":"f1ee1c406e7bbc01ada1fd26474c653ce5998ad10f34ff207cbadf0b6474e5c8"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.438574 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" podStartSLOduration=122.438551907 podStartE2EDuration="2m2.438551907s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.436289691 +0000 UTC m=+149.882196094" watchObservedRunningTime="2026-01-28 15:19:26.438551907 +0000 UTC m=+149.884458280" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.440407 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" event={"ID":"dfb911c8-efb4-4973-954c-808a8e87f0fe","Type":"ContainerStarted","Data":"e65d2a14e912b83613e54a1158c50b757448ad923c5453cd649b0b30b32e10e7"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.440561 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" event={"ID":"dfb911c8-efb4-4973-954c-808a8e87f0fe","Type":"ContainerStarted","Data":"fd7c8b24dc5628a59915dd9212dbfc1b919197ec71d88f680c51a2f2b4d108d9"} Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.440583 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.940569066 +0000 UTC m=+150.386475449 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.440521 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.443886 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.445077 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:26.945063287 +0000 UTC m=+150.390969670 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.463453 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" event={"ID":"5013fa35-7674-41bd-83a4-d01ff22253e9","Type":"ContainerStarted","Data":"9215426ece95f225896e65caf6b2a7618836876e36d417226a2411904ac23a6c"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.471709 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-884qg" podStartSLOduration=122.471683394 podStartE2EDuration="2m2.471683394s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.471376247 +0000 UTC m=+149.917282650" watchObservedRunningTime="2026-01-28 15:19:26.471683394 +0000 UTC m=+149.917589777" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.504391 4959 csr.go:261] certificate signing request csr-nz898 is approved, waiting to be issued Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.504440 4959 csr.go:257] certificate signing request csr-nz898 is issued Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.505122 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-b7ncr" event={"ID":"0032b760-b9d9-4533-ae6c-dfe3e55d16e6","Type":"ContainerStarted","Data":"1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.506448 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ljp65" podStartSLOduration=122.506430872 podStartE2EDuration="2m2.506430872s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.504920895 +0000 UTC m=+149.950827298" watchObservedRunningTime="2026-01-28 15:19:26.506430872 +0000 UTC m=+149.952337255" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.515940 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" event={"ID":"7e8823a3-69b2-4e0d-9c22-61c698970b38","Type":"ContainerStarted","Data":"95c95e63b867cc512fd9ac10ab34443107b6ddd014b87213a32d9250915c034b"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.529556 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-886ph" event={"ID":"62ef9da5-17d7-4dab-9868-9e7c7694b799","Type":"ContainerStarted","Data":"3a99654838718ee990ca773fc975fb6e381288207aece568014157e19c52dd2c"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.530653 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" event={"ID":"994c73fc-b24b-4cef-8145-561cc1dca4c7","Type":"ContainerStarted","Data":"3446c70c737f5e23587e3554652d5ca12a20e9faabebe14980a15076a324a4ef"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.530681 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" event={"ID":"994c73fc-b24b-4cef-8145-561cc1dca4c7","Type":"ContainerStarted","Data":"eec7add71968827d1c094775387301b5d6c415b199f1c1c7288d96818b6d9e38"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.532463 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.532866 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" event={"ID":"97400b4d-3097-4875-a5ec-08f867212a0e","Type":"ContainerStarted","Data":"8868aeb417e2be93ddf6fe019aeed3edc4a103fdc355a1527b4682db25d80104"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.532914 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" event={"ID":"97400b4d-3097-4875-a5ec-08f867212a0e","Type":"ContainerStarted","Data":"53747695115d23473ebd283372875c158103785b85bc446632572554f84a3beb"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.538176 4959 patch_prober.go:28] interesting pod/console-operator-58897d9998-dmkr9 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.538238 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" podUID="994c73fc-b24b-4cef-8145-561cc1dca4c7" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.540442 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" event={"ID":"0518e394-58c0-41d8-9f03-db75743ff4a8","Type":"ContainerStarted","Data":"f029e629c9af1d5300678787b62118f34783b7a180f50a12e4f5a1ee4d7e788e"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.544630 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.546359 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.04480242 +0000 UTC m=+150.490708803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.547054 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" event={"ID":"8b131611-c7ac-4409-9f6f-6f63309e4c55","Type":"ContainerStarted","Data":"c8dc202fc5d1e93b5402b42a5a3258f3a2b9b94d5b167d93a74ee8105eef0ebf"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.547657 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.552074 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.052059089 +0000 UTC m=+150.497965472 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.550613 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.553487 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-b7ncr" podStartSLOduration=123.553473094 podStartE2EDuration="2m3.553473094s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.552053109 +0000 UTC m=+149.997959492" watchObservedRunningTime="2026-01-28 15:19:26.553473094 +0000 UTC m=+149.999379477" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.560336 4959 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-hkcxz container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.560400 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" podUID="8b131611-c7ac-4409-9f6f-6f63309e4c55" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.565214 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" event={"ID":"6c2d3b29-3f92-4a16-9a53-0e98fb387802","Type":"ContainerStarted","Data":"aa93d72d2d75f3bc9da3ee78f5d2439d058433d62cfc1f5799711f66a6060c5d"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.584196 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-jm2jv" podStartSLOduration=122.584178012 podStartE2EDuration="2m2.584178012s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.581210879 +0000 UTC m=+150.027117262" watchObservedRunningTime="2026-01-28 15:19:26.584178012 +0000 UTC m=+150.030084395" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.624929 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" podStartSLOduration=122.624904788 podStartE2EDuration="2m2.624904788s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.624066237 +0000 UTC m=+150.069972640" watchObservedRunningTime="2026-01-28 15:19:26.624904788 +0000 UTC m=+150.070811171" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.687946 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" event={"ID":"6d14429e-3f90-4010-9b67-b0b6e81db122","Type":"ContainerStarted","Data":"a0382b2c7367c81e68e579cbe50daf6c93f28d771614488849022456b6a1735a"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.687998 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" event={"ID":"3c4683ca-d036-458c-87ab-aaf398a5dbf5","Type":"ContainerStarted","Data":"ec9199488ec28a3dfcfc65efd9f064b83cf33b32c3aef7b0e45059b5416c9fc3"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.688011 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" event={"ID":"d5aae7df-27bd-4553-8367-c4b6c65906ec","Type":"ContainerStarted","Data":"ec5af442a963091f9ecc9ee503198e81659772ecac506ffc922fb732bf6468ec"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.690677 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.692312 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" podStartSLOduration=123.692178498 podStartE2EDuration="2m3.692178498s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.68984155 +0000 UTC m=+150.135747933" watchObservedRunningTime="2026-01-28 15:19:26.692178498 +0000 UTC m=+150.138084891" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.693160 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" event={"ID":"83cc5140-ea3b-4939-a8c8-46566dea8c2d","Type":"ContainerStarted","Data":"92d1326afdcd57c9f2277d207f437abc79bb7479a77b9b540cec80f62ca9d3f1"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.693213 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" event={"ID":"83cc5140-ea3b-4939-a8c8-46566dea8c2d","Type":"ContainerStarted","Data":"ce88951f800c666c9dc71fd7ce820b2ce70528a72ca11ff52a0be00b78886bf3"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.699028 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" event={"ID":"3c910a54-aff5-4519-9a44-d17cc6001208","Type":"ContainerStarted","Data":"f3cd2a1791e8a8262b26df8f9cf931f9067281e95a8f7a951408a10557085147"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.699082 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" event={"ID":"3c910a54-aff5-4519-9a44-d17cc6001208","Type":"ContainerStarted","Data":"6c2fe069dfcf1288c4e53f99b9ad6119b1b39f72361143e33138983772717de7"} Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.704508 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.706188 4959 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2hr5x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.706262 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" podUID="06bff249-845d-4278-a2e5-a2a7c54c2f41" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.706673 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.192090346 +0000 UTC m=+150.637996729 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.711598 4959 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-4qzbj container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.711693 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" podUID="3c910a54-aff5-4519-9a44-d17cc6001208" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.749775 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.750225 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.752244 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" podStartSLOduration=122.752220311 podStartE2EDuration="2m2.752220311s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.752076417 +0000 UTC m=+150.197982820" watchObservedRunningTime="2026-01-28 15:19:26.752220311 +0000 UTC m=+150.198126694" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.755374 4959 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-chhqr container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.755441 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" podUID="3ce471aa-c701-4901-b0ed-c66d86cd0059" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.796973 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.805292 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.30527383 +0000 UTC m=+150.751180213 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.823648 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-9s7sv" podStartSLOduration=7.823626023 podStartE2EDuration="7.823626023s" podCreationTimestamp="2026-01-28 15:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.822215598 +0000 UTC m=+150.268121981" watchObservedRunningTime="2026-01-28 15:19:26.823626023 +0000 UTC m=+150.269532406" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.850459 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-kbz6d" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.898813 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:26 crc kubenswrapper[4959]: E0128 15:19:26.900319 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.400299866 +0000 UTC m=+150.846206249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.948530 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" podStartSLOduration=122.948509896 podStartE2EDuration="2m2.948509896s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.947148592 +0000 UTC m=+150.393054995" watchObservedRunningTime="2026-01-28 15:19:26.948509896 +0000 UTC m=+150.394416279" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.949869 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-qzhgw" podStartSLOduration=122.949862759 podStartE2EDuration="2m2.949862759s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.911861101 +0000 UTC m=+150.357767494" watchObservedRunningTime="2026-01-28 15:19:26.949862759 +0000 UTC m=+150.395769142" Jan 28 15:19:26 crc kubenswrapper[4959]: I0128 15:19:26.984546 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" podStartSLOduration=123.984520935 podStartE2EDuration="2m3.984520935s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:26.983556171 +0000 UTC m=+150.429462554" watchObservedRunningTime="2026-01-28 15:19:26.984520935 +0000 UTC m=+150.430427318" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.000941 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.001582 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.501560886 +0000 UTC m=+150.947467269 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.005946 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" podStartSLOduration=123.005918323 podStartE2EDuration="2m3.005918323s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:27.00173585 +0000 UTC m=+150.447642233" watchObservedRunningTime="2026-01-28 15:19:27.005918323 +0000 UTC m=+150.451824706" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.018162 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:27 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:27 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:27 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.018241 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.102789 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.103003 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.602963458 +0000 UTC m=+151.048869841 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.103394 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.103743 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.603733148 +0000 UTC m=+151.049639531 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.204057 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.204347 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.704306941 +0000 UTC m=+151.150213324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.204438 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.204805 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.704795892 +0000 UTC m=+151.150702275 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.306016 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.306258 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.806214646 +0000 UTC m=+151.252121039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.306631 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.306986 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.806973005 +0000 UTC m=+151.252879388 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.407693 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.408608 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:27.908587933 +0000 UTC m=+151.354494316 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.506664 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-28 15:14:26 +0000 UTC, rotation deadline is 2026-12-22 12:21:20.60319449 +0000 UTC Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.506714 4959 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7869h1m53.096484065s for next certificate rotation Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.509984 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.510350 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.010338624 +0000 UTC m=+151.456244997 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.611786 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.611936 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.111904242 +0000 UTC m=+151.557810625 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.612293 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.612687 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.112677851 +0000 UTC m=+151.558584234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.708774 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-886ph" event={"ID":"62ef9da5-17d7-4dab-9868-9e7c7694b799","Type":"ContainerStarted","Data":"9ae05884feeff53914c000732e9e5bb8a22d1e348b77afe1037409501c6eae1c"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.709387 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-886ph" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.711289 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" event={"ID":"d2b82dee-9207-4cad-ad4f-364e0c680d4c","Type":"ContainerStarted","Data":"f29d76306bcf8dc825af4f2b379db6031cd1ae9984ce830059dd056670a56af4"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.712880 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.713094 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.213058969 +0000 UTC m=+151.658965352 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.713224 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.713246 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" event={"ID":"3c4683ca-d036-458c-87ab-aaf398a5dbf5","Type":"ContainerStarted","Data":"8c913d2c6a27305ddbf760421303862246a14b9a0ef994cb7878218f1556cdf2"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.713461 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.713659 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.213644484 +0000 UTC m=+151.659550867 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.716716 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" event={"ID":"1cc89ccf-f7d4-4d82-abf9-088db33dbb05","Type":"ContainerStarted","Data":"e01f5ce659bf2adb5ec9fee518ee8179fe04bd831e947c316e25629812bdaf2b"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.718493 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" event={"ID":"a215a092-653c-4a83-9901-e5094b2c0f12","Type":"ContainerStarted","Data":"faee9156b3adc615ffbc85bc06dbba32878b7b32f99a1413d5dc2b25ba554b53"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.718563 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" event={"ID":"a215a092-653c-4a83-9901-e5094b2c0f12","Type":"ContainerStarted","Data":"83958d900fac40cbbb2aa45366a1710ed375c89f7461c40d360cc8e100408ab2"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.720085 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" event={"ID":"0518e394-58c0-41d8-9f03-db75743ff4a8","Type":"ContainerStarted","Data":"b5407b0e88b7ad0026b9e7f4f6cf4de354331bf81a94beef38b8a4665f4bb994"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.721558 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zj4pq" event={"ID":"7e8823a3-69b2-4e0d-9c22-61c698970b38","Type":"ContainerStarted","Data":"0c388e3c9750e51736a886153d3f14621eba702a3e85c76c3d917281ad08cc3c"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.723574 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" event={"ID":"6d14429e-3f90-4010-9b67-b0b6e81db122","Type":"ContainerStarted","Data":"f9cfe06d1fc138ee493d29f1f2bfc505c72bd985408a37ca38d3464ca5550955"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.725256 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" event={"ID":"dfb911c8-efb4-4973-954c-808a8e87f0fe","Type":"ContainerStarted","Data":"c93d441d27c8f5518a61eab797269ee79f491d47cda12e9f339ee99eece4cd6d"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.726681 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" event={"ID":"b26918d7-cc7c-4925-8e2f-ba17e60177d5","Type":"ContainerStarted","Data":"f62426346391de5640cbca1b0ff6db8bee2445cba80b4003b869b60019a49100"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.728607 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zklvs" event={"ID":"d5aae7df-27bd-4553-8367-c4b6c65906ec","Type":"ContainerStarted","Data":"c2dee7a45557bf6f637765b8f9028ffb402cfbf3ecccc3193ff3980147b50804"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.729696 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" event={"ID":"cd4a69ec-a69e-45fa-8105-d50da9b41212","Type":"ContainerStarted","Data":"f1cfaa32641369def2a71a933cea98e3598af260a6ac95df9fee1f044c1f6eba"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.731029 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" event={"ID":"97307cd7-d817-4da5-b2bc-adbb5f1406c8","Type":"ContainerStarted","Data":"a6aac48519e2700e58dc5347a4324d28778bb67219afedf477c03a0667e400c0"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.732076 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5vzvg" event={"ID":"6c2d3b29-3f92-4a16-9a53-0e98fb387802","Type":"ContainerStarted","Data":"93134a5f35887fa01e98cf555fa0ad314b2bd075d5420defad99f6bba6d586c9"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.733726 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" event={"ID":"5013fa35-7674-41bd-83a4-d01ff22253e9","Type":"ContainerStarted","Data":"7aa3fe7b3d8ba06556939f18daf936cdf8b4d6fed85d7f2950c5a11e3752534b"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.736417 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" event={"ID":"ce02fde1-7de1-4456-9fa4-6591a4b18b9c","Type":"ContainerStarted","Data":"418f61cd8e927e951edcdd252eb93f42061b233c255034a3efb257e1c67c8204"} Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.739970 4959 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6slvt container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" start-of-body= Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.740030 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" podUID="f12f8c01-cc1c-4a38-b234-e18bb9ff00d7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": dial tcp 10.217.0.35:5443: connect: connection refused" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.740790 4959 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plqj9 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.740830 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.741339 4959 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-hkcxz container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.741406 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" podUID="8b131611-c7ac-4409-9f6f-6f63309e4c55" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.23:8443/healthz\": dial tcp 10.217.0.23:8443: connect: connection refused" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.741427 4959 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-4qzbj container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.741481 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" podUID="3c910a54-aff5-4519-9a44-d17cc6001208" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.39:8443/healthz\": dial tcp 10.217.0.39:8443: connect: connection refused" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.743070 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-886ph" podStartSLOduration=8.74305746 podStartE2EDuration="8.74305746s" podCreationTimestamp="2026-01-28 15:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:27.740612349 +0000 UTC m=+151.186518732" watchObservedRunningTime="2026-01-28 15:19:27.74305746 +0000 UTC m=+151.188963843" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.745384 4959 patch_prober.go:28] interesting pod/console-operator-58897d9998-dmkr9 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.745414 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" podUID="994c73fc-b24b-4cef-8145-561cc1dca4c7" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.770013 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" podStartSLOduration=123.769982744 podStartE2EDuration="2m3.769982744s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:27.765893633 +0000 UTC m=+151.211800016" watchObservedRunningTime="2026-01-28 15:19:27.769982744 +0000 UTC m=+151.215889137" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.813906 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.814088 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.314060302 +0000 UTC m=+151.759966695 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.815074 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.824393 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-nkcwd" podStartSLOduration=123.824368317 podStartE2EDuration="2m3.824368317s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:27.783505318 +0000 UTC m=+151.229411701" watchObservedRunningTime="2026-01-28 15:19:27.824368317 +0000 UTC m=+151.270274700" Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.833016 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.33299058 +0000 UTC m=+151.778897143 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.880916 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fv8nc" podStartSLOduration=123.880893502 podStartE2EDuration="2m3.880893502s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:27.829543164 +0000 UTC m=+151.275449547" watchObservedRunningTime="2026-01-28 15:19:27.880893502 +0000 UTC m=+151.326799885" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.883237 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-k7vzj" podStartSLOduration=123.88322789 podStartE2EDuration="2m3.88322789s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:27.882645635 +0000 UTC m=+151.328552038" watchObservedRunningTime="2026-01-28 15:19:27.88322789 +0000 UTC m=+151.329134273" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.918394 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmmbc" podStartSLOduration=123.918376268 podStartE2EDuration="2m3.918376268s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:27.915708432 +0000 UTC m=+151.361614815" watchObservedRunningTime="2026-01-28 15:19:27.918376268 +0000 UTC m=+151.364282651" Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.919637 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:27 crc kubenswrapper[4959]: E0128 15:19:27.920100 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.42008618 +0000 UTC m=+151.865992563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:27 crc kubenswrapper[4959]: I0128 15:19:27.970900 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-ld2wn" podStartSLOduration=123.970877273 podStartE2EDuration="2m3.970877273s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:27.969303774 +0000 UTC m=+151.415210177" watchObservedRunningTime="2026-01-28 15:19:27.970877273 +0000 UTC m=+151.416783656" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.019400 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:28 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:28 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:28 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.019501 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.020504 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-ddn7x" podStartSLOduration=124.020478558 podStartE2EDuration="2m4.020478558s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:28.004253627 +0000 UTC m=+151.450160010" watchObservedRunningTime="2026-01-28 15:19:28.020478558 +0000 UTC m=+151.466384941" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.021267 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.021832 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.521817951 +0000 UTC m=+151.967724334 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.049533 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-nf28j" podStartSLOduration=124.049509104 podStartE2EDuration="2m4.049509104s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:28.046250764 +0000 UTC m=+151.492157157" watchObservedRunningTime="2026-01-28 15:19:28.049509104 +0000 UTC m=+151.495415487" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.080792 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9f8vj" podStartSLOduration=124.080769216 podStartE2EDuration="2m4.080769216s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:28.078018278 +0000 UTC m=+151.523924671" watchObservedRunningTime="2026-01-28 15:19:28.080769216 +0000 UTC m=+151.526675599" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.116491 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-fphck" podStartSLOduration=124.116467657 podStartE2EDuration="2m4.116467657s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:28.112745386 +0000 UTC m=+151.558651779" watchObservedRunningTime="2026-01-28 15:19:28.116467657 +0000 UTC m=+151.562374040" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.122745 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.131780 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.631716974 +0000 UTC m=+152.077623357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.132539 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.132978 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.632963594 +0000 UTC m=+152.078869977 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.154250 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-z8frl" podStartSLOduration=125.154226989 podStartE2EDuration="2m5.154226989s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:28.151529522 +0000 UTC m=+151.597435905" watchObservedRunningTime="2026-01-28 15:19:28.154226989 +0000 UTC m=+151.600133372" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.233321 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.233722 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.733686051 +0000 UTC m=+152.179592434 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.233863 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.234208 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.734193173 +0000 UTC m=+152.180099556 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.335185 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.335425 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.835391292 +0000 UTC m=+152.281297675 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.335812 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.336233 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.836224042 +0000 UTC m=+152.282130415 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.437591 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.437858 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.93781143 +0000 UTC m=+152.383717813 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.438489 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.438539 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.438583 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.438679 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.438742 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.439247 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:28.939213295 +0000 UTC m=+152.385119858 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.450456 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.450887 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.452753 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.453866 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.539836 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.539997 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.039968632 +0000 UTC m=+152.485875015 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.540048 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.540449 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.040439693 +0000 UTC m=+152.486346076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.609711 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.642086 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.642305 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.142267897 +0000 UTC m=+152.588174280 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.642419 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.642839 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.1428301 +0000 UTC m=+152.588736473 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.690815 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.690905 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.712751 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.719016 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.744078 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.744311 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.244269905 +0000 UTC m=+152.690176288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.744514 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.744909 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.24488875 +0000 UTC m=+152.690795143 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.757056 4959 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plqj9 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.757623 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.29:8080/healthz\": dial tcp 10.217.0.29:8080: connect: connection refused" Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.847493 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.848942 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.348921168 +0000 UTC m=+152.794827551 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.850250 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.850980 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.350971509 +0000 UTC m=+152.796877892 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:28 crc kubenswrapper[4959]: I0128 15:19:28.954594 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:28 crc kubenswrapper[4959]: E0128 15:19:28.955028 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.455008267 +0000 UTC m=+152.900914650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.045473 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:29 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:29 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:29 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.046033 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.060328 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.060707 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.560692196 +0000 UTC m=+153.006598579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.162704 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.163195 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.663175776 +0000 UTC m=+153.109082149 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.267122 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.267513 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.76749616 +0000 UTC m=+153.213402543 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.368203 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.368488 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.868447692 +0000 UTC m=+153.314354075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.368782 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.369278 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.869261373 +0000 UTC m=+153.315167766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.470433 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.470685 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.970642305 +0000 UTC m=+153.416548698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.471115 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.471501 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:29.971482736 +0000 UTC m=+153.417389119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.572646 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.572870 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.072833768 +0000 UTC m=+153.518740141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.573514 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.574131 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.074096849 +0000 UTC m=+153.520003232 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.675447 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.675937 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.175872472 +0000 UTC m=+153.621778855 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.676099 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.676942 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.176921738 +0000 UTC m=+153.622828121 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.738301 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zxfjz"] Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.739574 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.756914 4959 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6slvt container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.756982 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" podUID="f12f8c01-cc1c-4a38-b234-e18bb9ff00d7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.35:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.765671 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.785960 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.285904348 +0000 UTC m=+153.731810741 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.785964 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.786335 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-catalog-content\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.786395 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.786431 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksm4s\" (UniqueName: \"kubernetes.io/projected/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-kube-api-access-ksm4s\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.786560 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-utilities\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.786985 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.286963374 +0000 UTC m=+153.732869757 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.798422 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zxfjz"] Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.801323 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f549372cfab010b7ad16d2d207ea1226c4dd1b326a2e452c01a7073e83e68566"} Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.812616 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"03d7a032d44ac09a6b6270827a9cbb10b60e02d613e4bcaaf79fa709f2045143"} Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.838598 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"0f2903e9a0c1c9d70139d9ff85cf3c8b5f31c7c663976cdb52ebacc59edf89a9"} Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.888393 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.888619 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-catalog-content\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.888666 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksm4s\" (UniqueName: \"kubernetes.io/projected/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-kube-api-access-ksm4s\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.888712 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-utilities\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.889210 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-utilities\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.889285 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.389266109 +0000 UTC m=+153.835172492 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.889481 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-catalog-content\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.962401 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksm4s\" (UniqueName: \"kubernetes.io/projected/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-kube-api-access-ksm4s\") pod \"certified-operators-zxfjz\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:29 crc kubenswrapper[4959]: I0128 15:19:29.990679 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:29 crc kubenswrapper[4959]: E0128 15:19:29.991481 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.491440661 +0000 UTC m=+153.937347044 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.021173 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:30 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:30 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:30 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.021263 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.075497 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.092840 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.093207 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.593188793 +0000 UTC m=+154.039095176 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.134146 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r74c4"] Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.135219 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.180365 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r74c4"] Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.193947 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.194570 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6549\" (UniqueName: \"kubernetes.io/projected/13b2e969-f501-4266-bcf2-76514bf739c2-kube-api-access-j6549\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.194642 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-utilities\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.194678 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-catalog-content\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.194999 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.694987476 +0000 UTC m=+154.140893859 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.290830 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dz2cj"] Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.291918 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.297404 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.297767 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.298092 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-utilities\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.298171 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-catalog-content\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.298222 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6549\" (UniqueName: \"kubernetes.io/projected/13b2e969-f501-4266-bcf2-76514bf739c2-kube-api-access-j6549\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.298712 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.798692186 +0000 UTC m=+154.244598569 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.298955 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-utilities\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.299010 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-catalog-content\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.336972 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6549\" (UniqueName: \"kubernetes.io/projected/13b2e969-f501-4266-bcf2-76514bf739c2-kube-api-access-j6549\") pod \"certified-operators-r74c4\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.399083 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-utilities\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.399156 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.399217 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr95z\" (UniqueName: \"kubernetes.io/projected/288654ae-ff9c-4ab8-999a-29ca0266da2a-kube-api-access-tr95z\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.399263 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-catalog-content\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.399631 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:30.899617198 +0000 UTC m=+154.345523581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.416218 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dz2cj"] Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.468491 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.501312 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.501533 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr95z\" (UniqueName: \"kubernetes.io/projected/288654ae-ff9c-4ab8-999a-29ca0266da2a-kube-api-access-tr95z\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.501579 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-catalog-content\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.501599 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-utilities\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.502690 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-utilities\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.502779 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.002760544 +0000 UTC m=+154.448666927 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.503340 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-catalog-content\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.517950 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z8lcg"] Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.519444 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.588188 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z8lcg"] Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.603806 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.603872 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7s8j\" (UniqueName: \"kubernetes.io/projected/b181a50d-3075-479c-b460-bd2addc3e6b3-kube-api-access-t7s8j\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.603916 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-utilities\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.603959 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-catalog-content\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.604316 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.10430332 +0000 UTC m=+154.550209703 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.647190 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr95z\" (UniqueName: \"kubernetes.io/projected/288654ae-ff9c-4ab8-999a-29ca0266da2a-kube-api-access-tr95z\") pod \"community-operators-dz2cj\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.750843 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.751240 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-catalog-content\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.751377 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7s8j\" (UniqueName: \"kubernetes.io/projected/b181a50d-3075-479c-b460-bd2addc3e6b3-kube-api-access-t7s8j\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.751430 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-utilities\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.751931 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-utilities\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.752044 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.252022797 +0000 UTC m=+154.697929180 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.752288 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-catalog-content\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.815763 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7s8j\" (UniqueName: \"kubernetes.io/projected/b181a50d-3075-479c-b460-bd2addc3e6b3-kube-api-access-t7s8j\") pod \"community-operators-z8lcg\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.858547 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.858926 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.358909575 +0000 UTC m=+154.804815958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.884776 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.893344 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"7334f7cd7abf2576fe84b6b40275a58904298c518dee00892b2e3ccc91119b12"} Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.913750 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.915746 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"3d81c7905f409981f386bbf9f1d1877b177a6fc9e5eb5e654d1af28f2fc579fe"} Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.916647 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.934298 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.935071 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.939525 4959 generic.go:334] "Generic (PLEG): container finished" podID="3a0e736f-1d0f-4c59-ab45-b057dda052aa" containerID="751135847f1126e12a4a274ff297bcc95a44c2ee3644d4f94d92347be0a41934" exitCode=0 Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.939604 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" event={"ID":"3a0e736f-1d0f-4c59-ab45-b057dda052aa","Type":"ContainerDied","Data":"751135847f1126e12a4a274ff297bcc95a44c2ee3644d4f94d92347be0a41934"} Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.961158 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:30 crc kubenswrapper[4959]: E0128 15:19:30.961626 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.46159939 +0000 UTC m=+154.907505773 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.974762 4959 patch_prober.go:28] interesting pod/apiserver-76f77b778f-n5pdh container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]log ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]etcd ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/generic-apiserver-start-informers ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/max-in-flight-filter ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 28 15:19:30 crc kubenswrapper[4959]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 28 15:19:30 crc kubenswrapper[4959]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/project.openshift.io-projectcache ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/openshift.io-startinformers ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 28 15:19:30 crc kubenswrapper[4959]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 28 15:19:30 crc kubenswrapper[4959]: livez check failed Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.974842 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" podUID="3df41cde-0e93-462b-8391-cbb0dcf6ea4a" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.975771 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e482202dc0540ff69b07eccd83e3d4455922845cce88fa14efc8c7884c7ff900"} Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.983005 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 15:19:30 crc kubenswrapper[4959]: I0128 15:19:30.988825 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.039387 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:31 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:31 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:31 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.039832 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.040740 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.041005 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.056064 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.071021 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.072638 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.572621711 +0000 UTC m=+155.018528094 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.087361 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.087374 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.087443 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.087486 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.137562 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zxfjz"] Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.172086 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.172393 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.172475 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.172666 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.67264259 +0000 UTC m=+155.118548973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.276119 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.276210 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.276243 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.276383 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.277050 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.777034867 +0000 UTC m=+155.222941250 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.384729 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.385593 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:31.885575886 +0000 UTC m=+155.331482269 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.403908 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.472935 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.474521 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.479027 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.482510 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.490892 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.503047 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.503495 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.003481777 +0000 UTC m=+155.449388160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.605246 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.605448 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.605480 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.605583 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.105564287 +0000 UTC m=+155.551470670 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.654862 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.705960 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.706007 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.706066 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.706443 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.206427016 +0000 UTC m=+155.652333389 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.706954 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.775762 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.806822 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.807166 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.307147193 +0000 UTC m=+155.753053576 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.809738 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.855363 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-chhqr" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.861905 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.867722 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r74c4"] Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.909643 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:31 crc kubenswrapper[4959]: E0128 15:19:31.910630 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.410611767 +0000 UTC m=+155.856518150 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.911039 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dkw87"] Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.934499 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:31 crc kubenswrapper[4959]: I0128 15:19:31.939365 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.001010 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dkw87"] Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.014605 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.025084 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.025307 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-utilities\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.025387 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpzst\" (UniqueName: \"kubernetes.io/projected/9058c198-cfe2-496a-b045-d3650a0a36bf-kube-api-access-bpzst\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.025434 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-catalog-content\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.026500 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.526473527 +0000 UTC m=+155.972379910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.045284 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:32 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:32 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:32 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.045359 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.045625 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r74c4" event={"ID":"13b2e969-f501-4266-bcf2-76514bf739c2","Type":"ContainerStarted","Data":"b0f89077218dd11a009f55de013d06d828073b7217832db143e71e320ac405e3"} Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.083673 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxfjz" event={"ID":"5b46d2a5-2d15-4841-97a6-b3768e4df1d4","Type":"ContainerStarted","Data":"c468a9e1175f73700c086b5bba5b7203182f881ec67e93e82628bb848b6f804f"} Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.083728 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxfjz" event={"ID":"5b46d2a5-2d15-4841-97a6-b3768e4df1d4","Type":"ContainerStarted","Data":"e0e9f087269f8d57fae3d4f1eac95aa9a307ebc0ce0bbd41633c240965c9b215"} Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.089272 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" event={"ID":"d2b82dee-9207-4cad-ad4f-364e0c680d4c","Type":"ContainerStarted","Data":"bf2cc352fbb333777dc1eaf1f920076a9024e255f8a7260d941e63d5b916f846"} Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.126967 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpzst\" (UniqueName: \"kubernetes.io/projected/9058c198-cfe2-496a-b045-d3650a0a36bf-kube-api-access-bpzst\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.127067 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-catalog-content\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.127339 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-utilities\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.127392 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.129668 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-catalog-content\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.132904 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-utilities\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.133263 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.633246743 +0000 UTC m=+156.079153126 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.159433 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dz2cj"] Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.164958 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.168557 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpzst\" (UniqueName: \"kubernetes.io/projected/9058c198-cfe2-496a-b045-d3650a0a36bf-kube-api-access-bpzst\") pod \"redhat-marketplace-dkw87\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.198429 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.198916 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.203136 4959 patch_prober.go:28] interesting pod/console-f9d7485db-b7ncr container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.203198 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-b7ncr" podUID="0032b760-b9d9-4533-ae6c-dfe3e55d16e6" containerName="console" probeResult="failure" output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.207033 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z8lcg"] Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.233494 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.238919 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.73888953 +0000 UTC m=+156.184795913 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.246325 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.263609 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hkcxz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.319143 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.343566 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.343903 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.843890762 +0000 UTC m=+156.289797135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.357307 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6slvt" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.366740 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vd8kz"] Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.367950 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.386391 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vd8kz"] Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.446998 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.447563 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmgg9\" (UniqueName: \"kubernetes.io/projected/ef01ad95-eb28-48c8-9d58-3ea696164442-kube-api-access-xmgg9\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.447681 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-utilities\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.447885 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-catalog-content\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.448095 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:32.948069224 +0000 UTC m=+156.393975607 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.608307 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-catalog-content\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.610374 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmgg9\" (UniqueName: \"kubernetes.io/projected/ef01ad95-eb28-48c8-9d58-3ea696164442-kube-api-access-xmgg9\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.610455 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-utilities\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.610576 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.612991 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.112975585 +0000 UTC m=+156.558881968 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.613881 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-utilities\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.616949 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-catalog-content\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.656206 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4qzbj" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.697257 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmgg9\" (UniqueName: \"kubernetes.io/projected/ef01ad95-eb28-48c8-9d58-3ea696164442-kube-api-access-xmgg9\") pod \"redhat-marketplace-vd8kz\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.711942 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.712338 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.212319558 +0000 UTC m=+156.658225941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.718856 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.798797 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-dmkr9" Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.799765 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.813850 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.816849 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.316835137 +0000 UTC m=+156.762741520 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.917890 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.924258 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.424237738 +0000 UTC m=+156.870144121 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.924328 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:32 crc kubenswrapper[4959]: E0128 15:19:32.924810 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.424802262 +0000 UTC m=+156.870708645 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:32 crc kubenswrapper[4959]: I0128 15:19:32.963609 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.021295 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:33 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:33 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:33 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.021363 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.025196 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.025737 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.525721983 +0000 UTC m=+156.971628366 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.085481 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.129821 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvp97\" (UniqueName: \"kubernetes.io/projected/3a0e736f-1d0f-4c59-ab45-b057dda052aa-kube-api-access-jvp97\") pod \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.129934 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0e736f-1d0f-4c59-ab45-b057dda052aa-config-volume\") pod \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.130154 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0e736f-1d0f-4c59-ab45-b057dda052aa-secret-volume\") pod \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\" (UID: \"3a0e736f-1d0f-4c59-ab45-b057dda052aa\") " Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.130362 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.130800 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.630785666 +0000 UTC m=+157.076692049 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.132523 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a0e736f-1d0f-4c59-ab45-b057dda052aa-config-volume" (OuterVolumeSpecName: "config-volume") pod "3a0e736f-1d0f-4c59-ab45-b057dda052aa" (UID: "3a0e736f-1d0f-4c59-ab45-b057dda052aa"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.133408 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dz2cj" event={"ID":"288654ae-ff9c-4ab8-999a-29ca0266da2a","Type":"ContainerStarted","Data":"1c90a8fb708d2fdc7dd9b1b949e059f55483775fcd620aacc06e4a5709829697"} Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.143346 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" event={"ID":"3a0e736f-1d0f-4c59-ab45-b057dda052aa","Type":"ContainerDied","Data":"47ec97b9feae4b44444da076b24ac8ca48fa7cd31efada6300ab4edbe81a2b0a"} Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.143406 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47ec97b9feae4b44444da076b24ac8ca48fa7cd31efada6300ab4edbe81a2b0a" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.143494 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.149358 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8lcg" event={"ID":"b181a50d-3075-479c-b460-bd2addc3e6b3","Type":"ContainerStarted","Data":"78ea1d727582bde5d400a23d7e9c57cc7ee35bace2db576d6237e7867d574e36"} Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.149763 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a0e736f-1d0f-4c59-ab45-b057dda052aa-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3a0e736f-1d0f-4c59-ab45-b057dda052aa" (UID: "3a0e736f-1d0f-4c59-ab45-b057dda052aa"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.155438 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a0e736f-1d0f-4c59-ab45-b057dda052aa-kube-api-access-jvp97" (OuterVolumeSpecName: "kube-api-access-jvp97") pod "3a0e736f-1d0f-4c59-ab45-b057dda052aa" (UID: "3a0e736f-1d0f-4c59-ab45-b057dda052aa"). InnerVolumeSpecName "kube-api-access-jvp97". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.167247 4959 generic.go:334] "Generic (PLEG): container finished" podID="13b2e969-f501-4266-bcf2-76514bf739c2" containerID="8280103119d2cc8c2cf0e53e17a0d414f68c4cfffaf4db25abe3f8efd81035a5" exitCode=0 Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.167434 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r74c4" event={"ID":"13b2e969-f501-4266-bcf2-76514bf739c2","Type":"ContainerDied","Data":"8280103119d2cc8c2cf0e53e17a0d414f68c4cfffaf4db25abe3f8efd81035a5"} Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.172761 4959 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.174592 4959 generic.go:334] "Generic (PLEG): container finished" podID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerID="c468a9e1175f73700c086b5bba5b7203182f881ec67e93e82628bb848b6f804f" exitCode=0 Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.175565 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxfjz" event={"ID":"5b46d2a5-2d15-4841-97a6-b3768e4df1d4","Type":"ContainerDied","Data":"c468a9e1175f73700c086b5bba5b7203182f881ec67e93e82628bb848b6f804f"} Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.209870 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4c59b895-c9c4-4775-9502-d8eedd19e0b9","Type":"ContainerStarted","Data":"3e326586e7c31f807ea155ca9f04c7f156f903c3ca107c0a844d65f3426a4c61"} Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.235500 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.236022 4959 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a0e736f-1d0f-4c59-ab45-b057dda052aa-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.236038 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvp97\" (UniqueName: \"kubernetes.io/projected/3a0e736f-1d0f-4c59-ab45-b057dda052aa-kube-api-access-jvp97\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.236048 4959 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a0e736f-1d0f-4c59-ab45-b057dda052aa-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.236153 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.736134668 +0000 UTC m=+157.182041051 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.246366 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" event={"ID":"d2b82dee-9207-4cad-ad4f-364e0c680d4c","Type":"ContainerStarted","Data":"ef2f725451e7be19d74d791e02e83ec880dfaa708ba57f8e2bc23f363145058c"} Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.304884 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2tkvn"] Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.305203 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a0e736f-1d0f-4c59-ab45-b057dda052aa" containerName="collect-profiles" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.305230 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a0e736f-1d0f-4c59-ab45-b057dda052aa" containerName="collect-profiles" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.305348 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a0e736f-1d0f-4c59-ab45-b057dda052aa" containerName="collect-profiles" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.313221 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.317913 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2tkvn"] Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.328945 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.337800 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.339713 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.839688394 +0000 UTC m=+157.285594777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.439687 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.439838 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.939811435 +0000 UTC m=+157.385717828 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.439946 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhlh2\" (UniqueName: \"kubernetes.io/projected/575b26ae-87aa-469e-9bd9-1b4384d80093-kube-api-access-jhlh2\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.440007 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.440468 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:33.940455161 +0000 UTC m=+157.386361544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.440737 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-catalog-content\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.440813 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-utilities\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.452693 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dkw87"] Jan 28 15:19:33 crc kubenswrapper[4959]: W0128 15:19:33.508361 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9058c198_cfe2_496a_b045_d3650a0a36bf.slice/crio-b8ac479db03476586c8b1bcbb0f5c09c7fc430256e0f6feed4e5a52f77cd333c WatchSource:0}: Error finding container b8ac479db03476586c8b1bcbb0f5c09c7fc430256e0f6feed4e5a52f77cd333c: Status 404 returned error can't find the container with id b8ac479db03476586c8b1bcbb0f5c09c7fc430256e0f6feed4e5a52f77cd333c Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.543594 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.543881 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:34.043804012 +0000 UTC m=+157.489710405 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.544751 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-catalog-content\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.544785 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-utilities\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.544995 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhlh2\" (UniqueName: \"kubernetes.io/projected/575b26ae-87aa-469e-9bd9-1b4384d80093-kube-api-access-jhlh2\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.545474 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-catalog-content\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.545724 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-utilities\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.546301 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.546725 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:34.046707585 +0000 UTC m=+157.492613968 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.559550 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vd8kz"] Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.568791 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhlh2\" (UniqueName: \"kubernetes.io/projected/575b26ae-87aa-469e-9bd9-1b4384d80093-kube-api-access-jhlh2\") pod \"redhat-operators-2tkvn\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.578847 4959 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 28 15:19:33 crc kubenswrapper[4959]: W0128 15:19:33.581528 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef01ad95_eb28_48c8_9d58_3ea696164442.slice/crio-002afc0f535aceb8ba69648e5c883754f51d706aaae0a8a282e6413f7d4aed6a WatchSource:0}: Error finding container 002afc0f535aceb8ba69648e5c883754f51d706aaae0a8a282e6413f7d4aed6a: Status 404 returned error can't find the container with id 002afc0f535aceb8ba69648e5c883754f51d706aaae0a8a282e6413f7d4aed6a Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.647886 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.648139 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:34.148071746 +0000 UTC m=+157.593978129 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.648390 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.648893 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:34.148883657 +0000 UTC m=+157.594790040 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.654995 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.689715 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8ckgq"] Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.691021 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.727747 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8ckgq"] Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.752348 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.752513 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 15:19:34.252490664 +0000 UTC m=+157.698397047 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.752669 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:33 crc kubenswrapper[4959]: E0128 15:19:33.753028 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 15:19:34.253017757 +0000 UTC m=+157.698924140 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-l7lfr" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.779637 4959 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-28T15:19:33.578881969Z","Handler":null,"Name":""} Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.786922 4959 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.786957 4959 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.854489 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.855217 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-catalog-content\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.855304 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-utilities\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.855367 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cknrm\" (UniqueName: \"kubernetes.io/projected/48ead489-a40c-4d3c-a18f-83287043b523-kube-api-access-cknrm\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.868250 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.956425 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-catalog-content\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.956508 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-utilities\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.956545 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.956563 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cknrm\" (UniqueName: \"kubernetes.io/projected/48ead489-a40c-4d3c-a18f-83287043b523-kube-api-access-cknrm\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.957276 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-catalog-content\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.957633 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-utilities\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.977497 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cknrm\" (UniqueName: \"kubernetes.io/projected/48ead489-a40c-4d3c-a18f-83287043b523-kube-api-access-cknrm\") pod \"redhat-operators-8ckgq\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.979696 4959 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 15:19:33 crc kubenswrapper[4959]: I0128 15:19:33.979753 4959 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.009163 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-l7lfr\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.011190 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2tkvn"] Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.030329 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:34 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:34 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:34 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.030375 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.103508 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.111514 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-886ph" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.214025 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.295999 4959 generic.go:334] "Generic (PLEG): container finished" podID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerID="e37fc01c4af49c00ea64ad6159d0847776cc09f67cde277c376238c2f561087b" exitCode=0 Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.296082 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dkw87" event={"ID":"9058c198-cfe2-496a-b045-d3650a0a36bf","Type":"ContainerDied","Data":"e37fc01c4af49c00ea64ad6159d0847776cc09f67cde277c376238c2f561087b"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.296144 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dkw87" event={"ID":"9058c198-cfe2-496a-b045-d3650a0a36bf","Type":"ContainerStarted","Data":"b8ac479db03476586c8b1bcbb0f5c09c7fc430256e0f6feed4e5a52f77cd333c"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.302066 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe","Type":"ContainerStarted","Data":"23b120dd45423ffd4887634999ff311b554f6a2158e7bf36b821768270161c75"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.302178 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe","Type":"ContainerStarted","Data":"a62e70ca0168f17d989bff54672941fb0a0e39ac5e2fef51469f2f146a8c6013"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.307776 4959 generic.go:334] "Generic (PLEG): container finished" podID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerID="039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806" exitCode=0 Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.307918 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8lcg" event={"ID":"b181a50d-3075-479c-b460-bd2addc3e6b3","Type":"ContainerDied","Data":"039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.318639 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4c59b895-c9c4-4775-9502-d8eedd19e0b9","Type":"ContainerStarted","Data":"93bde34458171b14c5a71f24758c351f786042a0ad8d9163bcd051f7aec79561"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.332788 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2tkvn" event={"ID":"575b26ae-87aa-469e-9bd9-1b4384d80093","Type":"ContainerStarted","Data":"c02fd0ba9c3bcfcab6a875bfd7d343855fbcba1583b1be7e974841baae64479a"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.355025 4959 generic.go:334] "Generic (PLEG): container finished" podID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerID="aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37" exitCode=0 Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.355191 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vd8kz" event={"ID":"ef01ad95-eb28-48c8-9d58-3ea696164442","Type":"ContainerDied","Data":"aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.355232 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vd8kz" event={"ID":"ef01ad95-eb28-48c8-9d58-3ea696164442","Type":"ContainerStarted","Data":"002afc0f535aceb8ba69648e5c883754f51d706aaae0a8a282e6413f7d4aed6a"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.368594 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.368572422 podStartE2EDuration="3.368572422s" podCreationTimestamp="2026-01-28 15:19:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:34.367315471 +0000 UTC m=+157.813221874" watchObservedRunningTime="2026-01-28 15:19:34.368572422 +0000 UTC m=+157.814478795" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.390519 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" event={"ID":"d2b82dee-9207-4cad-ad4f-364e0c680d4c","Type":"ContainerStarted","Data":"c59e3b1877a2f2a8acab2be424c35dde25376d807330fc89f680d1464e2cc341"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.402174 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=4.402093929 podStartE2EDuration="4.402093929s" podCreationTimestamp="2026-01-28 15:19:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:34.400718455 +0000 UTC m=+157.846624838" watchObservedRunningTime="2026-01-28 15:19:34.402093929 +0000 UTC m=+157.848000312" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.416202 4959 generic.go:334] "Generic (PLEG): container finished" podID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerID="77ec4c0d4974ab232b0674713c70daba5437632953fa4303458902fb504a71ad" exitCode=0 Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.416280 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dz2cj" event={"ID":"288654ae-ff9c-4ab8-999a-29ca0266da2a","Type":"ContainerDied","Data":"77ec4c0d4974ab232b0674713c70daba5437632953fa4303458902fb504a71ad"} Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.585669 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-ctvhn" podStartSLOduration=15.585640801 podStartE2EDuration="15.585640801s" podCreationTimestamp="2026-01-28 15:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:34.561964426 +0000 UTC m=+158.007870829" watchObservedRunningTime="2026-01-28 15:19:34.585640801 +0000 UTC m=+158.031547184" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.602271 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.638100 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-l7lfr"] Jan 28 15:19:34 crc kubenswrapper[4959]: I0128 15:19:34.717775 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8ckgq"] Jan 28 15:19:34 crc kubenswrapper[4959]: W0128 15:19:34.764075 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48ead489_a40c_4d3c_a18f_83287043b523.slice/crio-ce83d40c49189155a45b985bd04589535d94203aabf2464e7f8741537632784d WatchSource:0}: Error finding container ce83d40c49189155a45b985bd04589535d94203aabf2464e7f8741537632784d: Status 404 returned error can't find the container with id ce83d40c49189155a45b985bd04589535d94203aabf2464e7f8741537632784d Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.018236 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:35 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:35 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:35 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.018718 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.431928 4959 generic.go:334] "Generic (PLEG): container finished" podID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerID="79aef054e2ac683eec971f74c06a371db5e7237fdac49fc2a8df0d2f5ebdd358" exitCode=0 Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.432024 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2tkvn" event={"ID":"575b26ae-87aa-469e-9bd9-1b4384d80093","Type":"ContainerDied","Data":"79aef054e2ac683eec971f74c06a371db5e7237fdac49fc2a8df0d2f5ebdd358"} Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.457925 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe","Type":"ContainerDied","Data":"23b120dd45423ffd4887634999ff311b554f6a2158e7bf36b821768270161c75"} Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.458073 4959 generic.go:334] "Generic (PLEG): container finished" podID="9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe" containerID="23b120dd45423ffd4887634999ff311b554f6a2158e7bf36b821768270161c75" exitCode=0 Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.479801 4959 generic.go:334] "Generic (PLEG): container finished" podID="48ead489-a40c-4d3c-a18f-83287043b523" containerID="4b8e2a29a96c5ae77fe918e0dbd3536272582715ee2de5526b685ee57a1cfa63" exitCode=0 Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.479941 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ckgq" event={"ID":"48ead489-a40c-4d3c-a18f-83287043b523","Type":"ContainerDied","Data":"4b8e2a29a96c5ae77fe918e0dbd3536272582715ee2de5526b685ee57a1cfa63"} Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.480013 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ckgq" event={"ID":"48ead489-a40c-4d3c-a18f-83287043b523","Type":"ContainerStarted","Data":"ce83d40c49189155a45b985bd04589535d94203aabf2464e7f8741537632784d"} Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.483643 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" event={"ID":"c29792dd-faa1-4f0c-b405-e0de581ee26f","Type":"ContainerStarted","Data":"e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1"} Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.483704 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" event={"ID":"c29792dd-faa1-4f0c-b405-e0de581ee26f","Type":"ContainerStarted","Data":"dd115c1dbb0fc44419a060935abf8be3bef37c45ad2f4cf33659375cde8956ea"} Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.484362 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.489831 4959 generic.go:334] "Generic (PLEG): container finished" podID="4c59b895-c9c4-4775-9502-d8eedd19e0b9" containerID="93bde34458171b14c5a71f24758c351f786042a0ad8d9163bcd051f7aec79561" exitCode=0 Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.491741 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4c59b895-c9c4-4775-9502-d8eedd19e0b9","Type":"ContainerDied","Data":"93bde34458171b14c5a71f24758c351f786042a0ad8d9163bcd051f7aec79561"} Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.569416 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" podStartSLOduration=131.569393554 podStartE2EDuration="2m11.569393554s" podCreationTimestamp="2026-01-28 15:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:19:35.565737885 +0000 UTC m=+159.011644288" watchObservedRunningTime="2026-01-28 15:19:35.569393554 +0000 UTC m=+159.015299947" Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.939850 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:35 crc kubenswrapper[4959]: I0128 15:19:35.944794 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-n5pdh" Jan 28 15:19:36 crc kubenswrapper[4959]: I0128 15:19:36.023717 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:36 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:36 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:36 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:36 crc kubenswrapper[4959]: I0128 15:19:36.023802 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:36 crc kubenswrapper[4959]: I0128 15:19:36.957245 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.022466 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:37 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:37 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:37 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.022558 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.058678 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kubelet-dir\") pod \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\" (UID: \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\") " Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.058813 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kube-api-access\") pod \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\" (UID: \"4c59b895-c9c4-4775-9502-d8eedd19e0b9\") " Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.064655 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4c59b895-c9c4-4775-9502-d8eedd19e0b9" (UID: "4c59b895-c9c4-4775-9502-d8eedd19e0b9"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.083517 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4c59b895-c9c4-4775-9502-d8eedd19e0b9" (UID: "4c59b895-c9c4-4775-9502-d8eedd19e0b9"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.138420 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.166005 4959 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.166049 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4c59b895-c9c4-4775-9502-d8eedd19e0b9-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.267773 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kubelet-dir\") pod \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\" (UID: \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\") " Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.267846 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kube-api-access\") pod \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\" (UID: \"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe\") " Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.267920 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe" (UID: "9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.268352 4959 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.280331 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe" (UID: "9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.370188 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.588873 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4c59b895-c9c4-4775-9502-d8eedd19e0b9","Type":"ContainerDied","Data":"3e326586e7c31f807ea155ca9f04c7f156f903c3ca107c0a844d65f3426a4c61"} Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.588959 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e326586e7c31f807ea155ca9f04c7f156f903c3ca107c0a844d65f3426a4c61" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.589043 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.593489 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe","Type":"ContainerDied","Data":"a62e70ca0168f17d989bff54672941fb0a0e39ac5e2fef51469f2f146a8c6013"} Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.593580 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a62e70ca0168f17d989bff54672941fb0a0e39ac5e2fef51469f2f146a8c6013" Jan 28 15:19:37 crc kubenswrapper[4959]: I0128 15:19:37.593680 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 15:19:38 crc kubenswrapper[4959]: I0128 15:19:38.041841 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:38 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:38 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:38 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:38 crc kubenswrapper[4959]: I0128 15:19:38.042182 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:39 crc kubenswrapper[4959]: I0128 15:19:39.017783 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:39 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:39 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:39 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:39 crc kubenswrapper[4959]: I0128 15:19:39.018308 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:40 crc kubenswrapper[4959]: I0128 15:19:40.018038 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:40 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:40 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:40 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:40 crc kubenswrapper[4959]: I0128 15:19:40.018980 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:41 crc kubenswrapper[4959]: I0128 15:19:41.016518 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:41 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:41 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:41 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:41 crc kubenswrapper[4959]: I0128 15:19:41.016587 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:41 crc kubenswrapper[4959]: I0128 15:19:41.074413 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:41 crc kubenswrapper[4959]: I0128 15:19:41.074475 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:41 crc kubenswrapper[4959]: I0128 15:19:41.074515 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:41 crc kubenswrapper[4959]: I0128 15:19:41.074605 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:42 crc kubenswrapper[4959]: I0128 15:19:42.039626 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:42 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:42 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:42 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:42 crc kubenswrapper[4959]: I0128 15:19:42.040442 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:42 crc kubenswrapper[4959]: I0128 15:19:42.198713 4959 patch_prober.go:28] interesting pod/console-f9d7485db-b7ncr container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Jan 28 15:19:42 crc kubenswrapper[4959]: I0128 15:19:42.198787 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-b7ncr" podUID="0032b760-b9d9-4533-ae6c-dfe3e55d16e6" containerName="console" probeResult="failure" output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" Jan 28 15:19:43 crc kubenswrapper[4959]: I0128 15:19:43.017846 4959 patch_prober.go:28] interesting pod/router-default-5444994796-5xm4l container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 15:19:43 crc kubenswrapper[4959]: [-]has-synced failed: reason withheld Jan 28 15:19:43 crc kubenswrapper[4959]: [+]process-running ok Jan 28 15:19:43 crc kubenswrapper[4959]: healthz check failed Jan 28 15:19:43 crc kubenswrapper[4959]: I0128 15:19:43.017926 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5xm4l" podUID="0aedd299-b681-4f9f-b92e-d2bf27be7d06" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 15:19:44 crc kubenswrapper[4959]: I0128 15:19:44.021877 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:44 crc kubenswrapper[4959]: I0128 15:19:44.027531 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-5xm4l" Jan 28 15:19:46 crc kubenswrapper[4959]: I0128 15:19:46.208547 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:46 crc kubenswrapper[4959]: I0128 15:19:46.216253 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/943bb4d7-0907-4b19-b9e0-580af6061632-metrics-certs\") pod \"network-metrics-daemon-4d9tj\" (UID: \"943bb4d7-0907-4b19-b9e0-580af6061632\") " pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:46 crc kubenswrapper[4959]: I0128 15:19:46.403228 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4d9tj" Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.076981 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.077543 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.076995 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.077675 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.077731 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.078407 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"9c86dde11baffef5192e034b43d7075397463d03af1f58cb31b0830d129fbe9c"} pod="openshift-console/downloads-7954f5f757-gqkxc" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.078496 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" containerID="cri-o://9c86dde11baffef5192e034b43d7075397463d03af1f58cb31b0830d129fbe9c" gracePeriod=2 Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.078660 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.078734 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.454921 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hr5x"] Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.455191 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" podUID="06bff249-845d-4278-a2e5-a2a7c54c2f41" containerName="controller-manager" containerID="cri-o://af24165060dbe0e98fe1f381d333546ba7f4675cd28dc47f80cb5289034ba401" gracePeriod=30 Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.467179 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw"] Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.467894 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" podUID="3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" containerName="route-controller-manager" containerID="cri-o://7dc244ecebf6f3fa789cefb450c713ccae24c8d588ed9c1397addcb520b03940" gracePeriod=30 Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.870889 4959 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-bpvnw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.871549 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" podUID="3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 15:19:51 crc kubenswrapper[4959]: E0128 15:19:51.906928 4959 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ed3dbaa_b8b3_4455_9f22_3d04817ee1a7.slice/crio-conmon-7dc244ecebf6f3fa789cefb450c713ccae24c8d588ed9c1397addcb520b03940.scope\": RecentStats: unable to find data in memory cache]" Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.914586 4959 generic.go:334] "Generic (PLEG): container finished" podID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerID="9c86dde11baffef5192e034b43d7075397463d03af1f58cb31b0830d129fbe9c" exitCode=0 Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.914672 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-gqkxc" event={"ID":"e84ed88e-eaf6-433c-b930-93f13ed09fcf","Type":"ContainerDied","Data":"9c86dde11baffef5192e034b43d7075397463d03af1f58cb31b0830d129fbe9c"} Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.916331 4959 generic.go:334] "Generic (PLEG): container finished" podID="3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" containerID="7dc244ecebf6f3fa789cefb450c713ccae24c8d588ed9c1397addcb520b03940" exitCode=0 Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.916401 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" event={"ID":"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7","Type":"ContainerDied","Data":"7dc244ecebf6f3fa789cefb450c713ccae24c8d588ed9c1397addcb520b03940"} Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.917641 4959 generic.go:334] "Generic (PLEG): container finished" podID="06bff249-845d-4278-a2e5-a2a7c54c2f41" containerID="af24165060dbe0e98fe1f381d333546ba7f4675cd28dc47f80cb5289034ba401" exitCode=0 Jan 28 15:19:51 crc kubenswrapper[4959]: I0128 15:19:51.917676 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" event={"ID":"06bff249-845d-4278-a2e5-a2a7c54c2f41","Type":"ContainerDied","Data":"af24165060dbe0e98fe1f381d333546ba7f4675cd28dc47f80cb5289034ba401"} Jan 28 15:19:52 crc kubenswrapper[4959]: I0128 15:19:52.143177 4959 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2hr5x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 28 15:19:52 crc kubenswrapper[4959]: I0128 15:19:52.143924 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" podUID="06bff249-845d-4278-a2e5-a2a7c54c2f41" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.11:8443/healthz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 28 15:19:52 crc kubenswrapper[4959]: I0128 15:19:52.273957 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:52 crc kubenswrapper[4959]: I0128 15:19:52.278126 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:19:54 crc kubenswrapper[4959]: I0128 15:19:54.109539 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.010184 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.015394 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.043566 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g"] Jan 28 15:19:57 crc kubenswrapper[4959]: E0128 15:19:57.043880 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c59b895-c9c4-4775-9502-d8eedd19e0b9" containerName="pruner" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.043897 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c59b895-c9c4-4775-9502-d8eedd19e0b9" containerName="pruner" Jan 28 15:19:57 crc kubenswrapper[4959]: E0128 15:19:57.043945 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" containerName="route-controller-manager" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.043954 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" containerName="route-controller-manager" Jan 28 15:19:57 crc kubenswrapper[4959]: E0128 15:19:57.043964 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06bff249-845d-4278-a2e5-a2a7c54c2f41" containerName="controller-manager" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.043971 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="06bff249-845d-4278-a2e5-a2a7c54c2f41" containerName="controller-manager" Jan 28 15:19:57 crc kubenswrapper[4959]: E0128 15:19:57.044017 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe" containerName="pruner" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.044029 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe" containerName="pruner" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.044197 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b68f297-7b99-4a44-a5c1-d9f3e5ba5ffe" containerName="pruner" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.044213 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" containerName="route-controller-manager" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.044229 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="06bff249-845d-4278-a2e5-a2a7c54c2f41" containerName="controller-manager" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.044240 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c59b895-c9c4-4775-9502-d8eedd19e0b9" containerName="pruner" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.045030 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.062007 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g"] Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.064804 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-client-ca\") pod \"06bff249-845d-4278-a2e5-a2a7c54c2f41\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.064911 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-proxy-ca-bundles\") pod \"06bff249-845d-4278-a2e5-a2a7c54c2f41\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.064976 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/06bff249-845d-4278-a2e5-a2a7c54c2f41-serving-cert\") pod \"06bff249-845d-4278-a2e5-a2a7c54c2f41\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.065069 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-config\") pod \"06bff249-845d-4278-a2e5-a2a7c54c2f41\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.065133 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkgqm\" (UniqueName: \"kubernetes.io/projected/06bff249-845d-4278-a2e5-a2a7c54c2f41-kube-api-access-jkgqm\") pod \"06bff249-845d-4278-a2e5-a2a7c54c2f41\" (UID: \"06bff249-845d-4278-a2e5-a2a7c54c2f41\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.068274 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "06bff249-845d-4278-a2e5-a2a7c54c2f41" (UID: "06bff249-845d-4278-a2e5-a2a7c54c2f41"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.068355 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-client-ca" (OuterVolumeSpecName: "client-ca") pod "06bff249-845d-4278-a2e5-a2a7c54c2f41" (UID: "06bff249-845d-4278-a2e5-a2a7c54c2f41"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.069881 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-config" (OuterVolumeSpecName: "config") pod "06bff249-845d-4278-a2e5-a2a7c54c2f41" (UID: "06bff249-845d-4278-a2e5-a2a7c54c2f41"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.073564 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06bff249-845d-4278-a2e5-a2a7c54c2f41-kube-api-access-jkgqm" (OuterVolumeSpecName: "kube-api-access-jkgqm") pod "06bff249-845d-4278-a2e5-a2a7c54c2f41" (UID: "06bff249-845d-4278-a2e5-a2a7c54c2f41"). InnerVolumeSpecName "kube-api-access-jkgqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.081405 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06bff249-845d-4278-a2e5-a2a7c54c2f41-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "06bff249-845d-4278-a2e5-a2a7c54c2f41" (UID: "06bff249-845d-4278-a2e5-a2a7c54c2f41"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.166412 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-client-ca\") pod \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.166596 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-serving-cert\") pod \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.166637 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-config\") pod \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.166705 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2whj7\" (UniqueName: \"kubernetes.io/projected/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-kube-api-access-2whj7\") pod \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\" (UID: \"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7\") " Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.166906 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s797f\" (UniqueName: \"kubernetes.io/projected/41a63f0b-93c0-4c28-b490-e92daa53f408-kube-api-access-s797f\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.166971 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-proxy-ca-bundles\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.166999 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-client-ca\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167020 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41a63f0b-93c0-4c28-b490-e92daa53f408-serving-cert\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167068 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-config\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167154 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167175 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167188 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/06bff249-845d-4278-a2e5-a2a7c54c2f41-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167200 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06bff249-845d-4278-a2e5-a2a7c54c2f41-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167213 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkgqm\" (UniqueName: \"kubernetes.io/projected/06bff249-845d-4278-a2e5-a2a7c54c2f41-kube-api-access-jkgqm\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167876 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-client-ca" (OuterVolumeSpecName: "client-ca") pod "3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" (UID: "3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.167881 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-config" (OuterVolumeSpecName: "config") pod "3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" (UID: "3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.170085 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" (UID: "3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.170607 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-kube-api-access-2whj7" (OuterVolumeSpecName: "kube-api-access-2whj7") pod "3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" (UID: "3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7"). InnerVolumeSpecName "kube-api-access-2whj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268346 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s797f\" (UniqueName: \"kubernetes.io/projected/41a63f0b-93c0-4c28-b490-e92daa53f408-kube-api-access-s797f\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268436 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-proxy-ca-bundles\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268476 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-client-ca\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268496 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41a63f0b-93c0-4c28-b490-e92daa53f408-serving-cert\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268514 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-config\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268583 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268596 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268606 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.268618 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2whj7\" (UniqueName: \"kubernetes.io/projected/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7-kube-api-access-2whj7\") on node \"crc\" DevicePath \"\"" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.270521 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-client-ca\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.270767 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-proxy-ca-bundles\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.271995 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-config\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.273405 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41a63f0b-93c0-4c28-b490-e92daa53f408-serving-cert\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.284998 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s797f\" (UniqueName: \"kubernetes.io/projected/41a63f0b-93c0-4c28-b490-e92daa53f408-kube-api-access-s797f\") pod \"controller-manager-75b69fd7b6-lgj9g\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.370608 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.963335 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" event={"ID":"06bff249-845d-4278-a2e5-a2a7c54c2f41","Type":"ContainerDied","Data":"3b4cb9c0f8c8747cf8386628dca9b966ad0be163c57293802a1ba8fe6699f298"} Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.963399 4959 scope.go:117] "RemoveContainer" containerID="af24165060dbe0e98fe1f381d333546ba7f4675cd28dc47f80cb5289034ba401" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.964508 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2hr5x" Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.965718 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" event={"ID":"3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7","Type":"ContainerDied","Data":"3a88b05fb2f139fba957ce783d74a6464462bbd98677776b6fb34e5d8ed52c9d"} Jan 28 15:19:57 crc kubenswrapper[4959]: I0128 15:19:57.965799 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw" Jan 28 15:19:58 crc kubenswrapper[4959]: I0128 15:19:58.005283 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw"] Jan 28 15:19:58 crc kubenswrapper[4959]: I0128 15:19:58.009872 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bpvnw"] Jan 28 15:19:58 crc kubenswrapper[4959]: I0128 15:19:58.015898 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hr5x"] Jan 28 15:19:58 crc kubenswrapper[4959]: I0128 15:19:58.019055 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2hr5x"] Jan 28 15:19:58 crc kubenswrapper[4959]: I0128 15:19:58.595136 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06bff249-845d-4278-a2e5-a2a7c54c2f41" path="/var/lib/kubelet/pods/06bff249-845d-4278-a2e5-a2a7c54c2f41/volumes" Jan 28 15:19:58 crc kubenswrapper[4959]: I0128 15:19:58.595978 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7" path="/var/lib/kubelet/pods/3ed3dbaa-b8b3-4455-9f22-3d04817ee1a7/volumes" Jan 28 15:19:58 crc kubenswrapper[4959]: I0128 15:19:58.688974 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:19:58 crc kubenswrapper[4959]: I0128 15:19:58.689069 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.091406 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x"] Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.092174 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.095546 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.096392 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.096607 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.096824 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.093975 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.100501 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.101667 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x"] Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.198471 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79de08f6-bcf7-4edc-ad40-93d4de1329ab-serving-cert\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.198542 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-config\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.198572 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9t8f\" (UniqueName: \"kubernetes.io/projected/79de08f6-bcf7-4edc-ad40-93d4de1329ab-kube-api-access-l9t8f\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.198711 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-client-ca\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.299700 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-client-ca\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.299749 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79de08f6-bcf7-4edc-ad40-93d4de1329ab-serving-cert\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.299783 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-config\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.299806 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9t8f\" (UniqueName: \"kubernetes.io/projected/79de08f6-bcf7-4edc-ad40-93d4de1329ab-kube-api-access-l9t8f\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.301055 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-client-ca\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.301296 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-config\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.307170 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79de08f6-bcf7-4edc-ad40-93d4de1329ab-serving-cert\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.316911 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9t8f\" (UniqueName: \"kubernetes.io/projected/79de08f6-bcf7-4edc-ad40-93d4de1329ab-kube-api-access-l9t8f\") pod \"route-controller-manager-7965ddf678-wnq9x\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:19:59 crc kubenswrapper[4959]: I0128 15:19:59.461467 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:20:01 crc kubenswrapper[4959]: I0128 15:20:01.076196 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:20:01 crc kubenswrapper[4959]: I0128 15:20:01.076633 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:20:02 crc kubenswrapper[4959]: I0128 15:20:02.108709 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-mszk6" Jan 28 15:20:07 crc kubenswrapper[4959]: E0128 15:20:07.785361 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 15:20:07 crc kubenswrapper[4959]: E0128 15:20:07.786815 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xmgg9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-vd8kz_openshift-marketplace(ef01ad95-eb28-48c8-9d58-3ea696164442): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 15:20:07 crc kubenswrapper[4959]: E0128 15:20:07.788090 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-vd8kz" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" Jan 28 15:20:08 crc kubenswrapper[4959]: I0128 15:20:08.723794 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 15:20:10 crc kubenswrapper[4959]: E0128 15:20:10.444370 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-vd8kz" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" Jan 28 15:20:11 crc kubenswrapper[4959]: I0128 15:20:11.075562 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:20:11 crc kubenswrapper[4959]: I0128 15:20:11.075655 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:20:11 crc kubenswrapper[4959]: I0128 15:20:11.428890 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g"] Jan 28 15:20:11 crc kubenswrapper[4959]: I0128 15:20:11.564695 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x"] Jan 28 15:20:13 crc kubenswrapper[4959]: E0128 15:20:13.617276 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 15:20:13 crc kubenswrapper[4959]: E0128 15:20:13.617502 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cknrm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-8ckgq_openshift-marketplace(48ead489-a40c-4d3c-a18f-83287043b523): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 15:20:13 crc kubenswrapper[4959]: E0128 15:20:13.618744 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-8ckgq" podUID="48ead489-a40c-4d3c-a18f-83287043b523" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.455296 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.457454 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.460247 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.460876 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.461338 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.522839 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6905576b-e5ea-424f-8060-d4e2d3ceef14-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6905576b-e5ea-424f-8060-d4e2d3ceef14\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.523000 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6905576b-e5ea-424f-8060-d4e2d3ceef14-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6905576b-e5ea-424f-8060-d4e2d3ceef14\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.624639 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6905576b-e5ea-424f-8060-d4e2d3ceef14-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6905576b-e5ea-424f-8060-d4e2d3ceef14\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.624735 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6905576b-e5ea-424f-8060-d4e2d3ceef14-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6905576b-e5ea-424f-8060-d4e2d3ceef14\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.625308 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6905576b-e5ea-424f-8060-d4e2d3ceef14-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6905576b-e5ea-424f-8060-d4e2d3ceef14\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.644439 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6905576b-e5ea-424f-8060-d4e2d3ceef14-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6905576b-e5ea-424f-8060-d4e2d3ceef14\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:14 crc kubenswrapper[4959]: I0128 15:20:14.783505 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:15 crc kubenswrapper[4959]: E0128 15:20:15.257054 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-8ckgq" podUID="48ead489-a40c-4d3c-a18f-83287043b523" Jan 28 15:20:15 crc kubenswrapper[4959]: E0128 15:20:15.541244 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 15:20:15 crc kubenswrapper[4959]: E0128 15:20:15.541456 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j6549,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-r74c4_openshift-marketplace(13b2e969-f501-4266-bcf2-76514bf739c2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 15:20:15 crc kubenswrapper[4959]: E0128 15:20:15.542669 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-r74c4" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" Jan 28 15:20:20 crc kubenswrapper[4959]: E0128 15:20:20.101494 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-r74c4" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.194615 4959 scope.go:117] "RemoveContainer" containerID="7dc244ecebf6f3fa789cefb450c713ccae24c8d588ed9c1397addcb520b03940" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.222006 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.223051 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.255831 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.312277 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/beb38771-a14f-473a-9e2b-c9585abb58dc-kube-api-access\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.312366 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-var-lock\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.312435 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-kubelet-dir\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.326150 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4d9tj"] Jan 28 15:20:20 crc kubenswrapper[4959]: W0128 15:20:20.342732 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod943bb4d7_0907_4b19_b9e0_580af6061632.slice/crio-d906b28eda925f8b43e315269df2d8b29aa60b031aa84a95fae363f9744f4034 WatchSource:0}: Error finding container d906b28eda925f8b43e315269df2d8b29aa60b031aa84a95fae363f9744f4034: Status 404 returned error can't find the container with id d906b28eda925f8b43e315269df2d8b29aa60b031aa84a95fae363f9744f4034 Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.414437 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/beb38771-a14f-473a-9e2b-c9585abb58dc-kube-api-access\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.414556 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-var-lock\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.414666 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-kubelet-dir\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.414804 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-kubelet-dir\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.414863 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-var-lock\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.435544 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/beb38771-a14f-473a-9e2b-c9585abb58dc-kube-api-access\") pod \"installer-9-crc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.546496 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 15:20:20 crc kubenswrapper[4959]: W0128 15:20:20.552390 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod6905576b_e5ea_424f_8060_d4e2d3ceef14.slice/crio-6a60e915aff71143cade18be05e00f3de47f4977e863ec16aba8b9264c7c446a WatchSource:0}: Error finding container 6a60e915aff71143cade18be05e00f3de47f4977e863ec16aba8b9264c7c446a: Status 404 returned error can't find the container with id 6a60e915aff71143cade18be05e00f3de47f4977e863ec16aba8b9264c7c446a Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.628340 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.673577 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g"] Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.676990 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x"] Jan 28 15:20:20 crc kubenswrapper[4959]: W0128 15:20:20.677123 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41a63f0b_93c0_4c28_b490_e92daa53f408.slice/crio-bb27aed3444fb67088dfb4783337fdaad742eaf88575754f163e7a7448e90afb WatchSource:0}: Error finding container bb27aed3444fb67088dfb4783337fdaad742eaf88575754f163e7a7448e90afb: Status 404 returned error can't find the container with id bb27aed3444fb67088dfb4783337fdaad742eaf88575754f163e7a7448e90afb Jan 28 15:20:20 crc kubenswrapper[4959]: I0128 15:20:20.870829 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 15:20:20 crc kubenswrapper[4959]: W0128 15:20:20.874242 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podbeb38771_a14f_473a_9e2b_c9585abb58dc.slice/crio-0de0ed828c41581e21508149e51763a75bddb9d017e55b5fa3f829ecf7b1c53f WatchSource:0}: Error finding container 0de0ed828c41581e21508149e51763a75bddb9d017e55b5fa3f829ecf7b1c53f: Status 404 returned error can't find the container with id 0de0ed828c41581e21508149e51763a75bddb9d017e55b5fa3f829ecf7b1c53f Jan 28 15:20:21 crc kubenswrapper[4959]: I0128 15:20:21.074374 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:20:21 crc kubenswrapper[4959]: I0128 15:20:21.074460 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:20:21 crc kubenswrapper[4959]: I0128 15:20:21.123137 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" event={"ID":"943bb4d7-0907-4b19-b9e0-580af6061632","Type":"ContainerStarted","Data":"d906b28eda925f8b43e315269df2d8b29aa60b031aa84a95fae363f9744f4034"} Jan 28 15:20:21 crc kubenswrapper[4959]: I0128 15:20:21.124479 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" event={"ID":"41a63f0b-93c0-4c28-b490-e92daa53f408","Type":"ContainerStarted","Data":"bb27aed3444fb67088dfb4783337fdaad742eaf88575754f163e7a7448e90afb"} Jan 28 15:20:21 crc kubenswrapper[4959]: I0128 15:20:21.125998 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"beb38771-a14f-473a-9e2b-c9585abb58dc","Type":"ContainerStarted","Data":"0de0ed828c41581e21508149e51763a75bddb9d017e55b5fa3f829ecf7b1c53f"} Jan 28 15:20:21 crc kubenswrapper[4959]: I0128 15:20:21.129142 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"6905576b-e5ea-424f-8060-d4e2d3ceef14","Type":"ContainerStarted","Data":"6a60e915aff71143cade18be05e00f3de47f4977e863ec16aba8b9264c7c446a"} Jan 28 15:20:21 crc kubenswrapper[4959]: I0128 15:20:21.130298 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" event={"ID":"79de08f6-bcf7-4edc-ad40-93d4de1329ab","Type":"ContainerStarted","Data":"fb3a4b7b1b92a855dea4fad1615d503ed1d9fabe99c5fa6ea4fef494bb34f154"} Jan 28 15:20:21 crc kubenswrapper[4959]: E0128 15:20:21.719908 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 15:20:21 crc kubenswrapper[4959]: E0128 15:20:21.720263 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tr95z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-dz2cj_openshift-marketplace(288654ae-ff9c-4ab8-999a-29ca0266da2a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 15:20:21 crc kubenswrapper[4959]: E0128 15:20:21.721535 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-dz2cj" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" Jan 28 15:20:21 crc kubenswrapper[4959]: E0128 15:20:21.849362 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 15:20:21 crc kubenswrapper[4959]: E0128 15:20:21.849585 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jhlh2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-2tkvn_openshift-marketplace(575b26ae-87aa-469e-9bd9-1b4384d80093): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 15:20:21 crc kubenswrapper[4959]: E0128 15:20:21.850729 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-2tkvn" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.144040 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-gqkxc" event={"ID":"e84ed88e-eaf6-433c-b930-93f13ed09fcf","Type":"ContainerStarted","Data":"38392eb78c48a8f5d067907049f101e02dae6fd944081d956a6d3f8437fb2573"} Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.145639 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.145744 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.145791 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.147317 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" event={"ID":"943bb4d7-0907-4b19-b9e0-580af6061632","Type":"ContainerStarted","Data":"52c54009c7e26edf7de24a68e424bdcf3499ee90abd5897214d90f7bfb15d049"} Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.149211 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"beb38771-a14f-473a-9e2b-c9585abb58dc","Type":"ContainerStarted","Data":"6217bd860b9ca7c69120c91eae5ffde7737f795749b394ecbf501ffd46dade00"} Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.154766 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" event={"ID":"41a63f0b-93c0-4c28-b490-e92daa53f408","Type":"ContainerStarted","Data":"2a6c7a509f4ae08fdd998f4e592f3898e64998b62897ec24c06a6726cf35a0e9"} Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.155121 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" podUID="41a63f0b-93c0-4c28-b490-e92daa53f408" containerName="controller-manager" containerID="cri-o://2a6c7a509f4ae08fdd998f4e592f3898e64998b62897ec24c06a6726cf35a0e9" gracePeriod=30 Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.155722 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.157054 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"6905576b-e5ea-424f-8060-d4e2d3ceef14","Type":"ContainerStarted","Data":"907ba3d0cdd6fde8abddab42bf1c4e19e9366b0a9f9a6d61190cfb41ebfacbc3"} Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.166184 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.169037 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" podUID="79de08f6-bcf7-4edc-ad40-93d4de1329ab" containerName="route-controller-manager" containerID="cri-o://e1eaa360e69834375a5b95d21ed0ed563652d892feb84bd35e543ef4d066ab52" gracePeriod=30 Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.169163 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" event={"ID":"79de08f6-bcf7-4edc-ad40-93d4de1329ab","Type":"ContainerStarted","Data":"e1eaa360e69834375a5b95d21ed0ed563652d892feb84bd35e543ef4d066ab52"} Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.170018 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.178967 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-2tkvn" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.179090 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-dz2cj" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.183382 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.238396 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.238321066 podStartE2EDuration="2.238321066s" podCreationTimestamp="2026-01-28 15:20:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:22.237546316 +0000 UTC m=+205.683452709" watchObservedRunningTime="2026-01-28 15:20:22.238321066 +0000 UTC m=+205.684227469" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.260483 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=8.260436319 podStartE2EDuration="8.260436319s" podCreationTimestamp="2026-01-28 15:20:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:22.25969718 +0000 UTC m=+205.705603563" watchObservedRunningTime="2026-01-28 15:20:22.260436319 +0000 UTC m=+205.706342702" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.282343 4959 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41a63f0b_93c0_4c28_b490_e92daa53f408.slice/crio-2a6c7a509f4ae08fdd998f4e592f3898e64998b62897ec24c06a6726cf35a0e9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79de08f6_bcf7_4edc_ad40_93d4de1329ab.slice/crio-e1eaa360e69834375a5b95d21ed0ed563652d892feb84bd35e543ef4d066ab52.scope\": RecentStats: unable to find data in memory cache]" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.287132 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" podStartSLOduration=31.287094227 podStartE2EDuration="31.287094227s" podCreationTimestamp="2026-01-28 15:19:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:22.285502286 +0000 UTC m=+205.731408679" watchObservedRunningTime="2026-01-28 15:20:22.287094227 +0000 UTC m=+205.733000610" Jan 28 15:20:22 crc kubenswrapper[4959]: I0128 15:20:22.302936 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" podStartSLOduration=31.302911292 podStartE2EDuration="31.302911292s" podCreationTimestamp="2026-01-28 15:19:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:22.302585674 +0000 UTC m=+205.748492087" watchObservedRunningTime="2026-01-28 15:20:22.302911292 +0000 UTC m=+205.748817675" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.889233 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.889528 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bpzst,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-dkw87_openshift-marketplace(9058c198-cfe2-496a-b045-d3650a0a36bf): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.893212 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-dkw87" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.969609 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.969891 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ksm4s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-zxfjz_openshift-marketplace(5b46d2a5-2d15-4841-97a6-b3768e4df1d4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 15:20:22 crc kubenswrapper[4959]: E0128 15:20:22.971366 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-zxfjz" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" Jan 28 15:20:23 crc kubenswrapper[4959]: E0128 15:20:23.052361 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 15:20:23 crc kubenswrapper[4959]: E0128 15:20:23.052591 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t7s8j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-z8lcg_openshift-marketplace(b181a50d-3075-479c-b460-bd2addc3e6b3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 15:20:23 crc kubenswrapper[4959]: E0128 15:20:23.053855 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-z8lcg" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.175695 4959 generic.go:334] "Generic (PLEG): container finished" podID="6905576b-e5ea-424f-8060-d4e2d3ceef14" containerID="907ba3d0cdd6fde8abddab42bf1c4e19e9366b0a9f9a6d61190cfb41ebfacbc3" exitCode=0 Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.175810 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"6905576b-e5ea-424f-8060-d4e2d3ceef14","Type":"ContainerDied","Data":"907ba3d0cdd6fde8abddab42bf1c4e19e9366b0a9f9a6d61190cfb41ebfacbc3"} Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.178399 4959 generic.go:334] "Generic (PLEG): container finished" podID="79de08f6-bcf7-4edc-ad40-93d4de1329ab" containerID="e1eaa360e69834375a5b95d21ed0ed563652d892feb84bd35e543ef4d066ab52" exitCode=0 Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.178476 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" event={"ID":"79de08f6-bcf7-4edc-ad40-93d4de1329ab","Type":"ContainerDied","Data":"e1eaa360e69834375a5b95d21ed0ed563652d892feb84bd35e543ef4d066ab52"} Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.178513 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" event={"ID":"79de08f6-bcf7-4edc-ad40-93d4de1329ab","Type":"ContainerDied","Data":"fb3a4b7b1b92a855dea4fad1615d503ed1d9fabe99c5fa6ea4fef494bb34f154"} Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.178527 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb3a4b7b1b92a855dea4fad1615d503ed1d9fabe99c5fa6ea4fef494bb34f154" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.180998 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4d9tj" event={"ID":"943bb4d7-0907-4b19-b9e0-580af6061632","Type":"ContainerStarted","Data":"45823e072461d17bbd70c58059b9400337ed40e4656deabf72f44ee895f73976"} Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.184338 4959 generic.go:334] "Generic (PLEG): container finished" podID="41a63f0b-93c0-4c28-b490-e92daa53f408" containerID="2a6c7a509f4ae08fdd998f4e592f3898e64998b62897ec24c06a6726cf35a0e9" exitCode=0 Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.185552 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" event={"ID":"41a63f0b-93c0-4c28-b490-e92daa53f408","Type":"ContainerDied","Data":"2a6c7a509f4ae08fdd998f4e592f3898e64998b62897ec24c06a6726cf35a0e9"} Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.185595 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" event={"ID":"41a63f0b-93c0-4c28-b490-e92daa53f408","Type":"ContainerDied","Data":"bb27aed3444fb67088dfb4783337fdaad742eaf88575754f163e7a7448e90afb"} Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.185606 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb27aed3444fb67088dfb4783337fdaad742eaf88575754f163e7a7448e90afb" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.186915 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.186948 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:20:23 crc kubenswrapper[4959]: E0128 15:20:23.187290 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-zxfjz" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" Jan 28 15:20:23 crc kubenswrapper[4959]: E0128 15:20:23.187419 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-z8lcg" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" Jan 28 15:20:23 crc kubenswrapper[4959]: E0128 15:20:23.187556 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-dkw87" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.228345 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.235976 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.260825 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-4d9tj" podStartSLOduration=180.260800312 podStartE2EDuration="3m0.260800312s" podCreationTimestamp="2026-01-28 15:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:23.258649478 +0000 UTC m=+206.704555871" watchObservedRunningTime="2026-01-28 15:20:23.260800312 +0000 UTC m=+206.706706695" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.261009 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79de08f6-bcf7-4edc-ad40-93d4de1329ab-serving-cert\") pod \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.261176 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-client-ca\") pod \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.261249 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-config\") pod \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.261278 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9t8f\" (UniqueName: \"kubernetes.io/projected/79de08f6-bcf7-4edc-ad40-93d4de1329ab-kube-api-access-l9t8f\") pod \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\" (UID: \"79de08f6-bcf7-4edc-ad40-93d4de1329ab\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.262629 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-client-ca" (OuterVolumeSpecName: "client-ca") pod "79de08f6-bcf7-4edc-ad40-93d4de1329ab" (UID: "79de08f6-bcf7-4edc-ad40-93d4de1329ab"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.263635 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-config" (OuterVolumeSpecName: "config") pod "79de08f6-bcf7-4edc-ad40-93d4de1329ab" (UID: "79de08f6-bcf7-4edc-ad40-93d4de1329ab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.269400 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79de08f6-bcf7-4edc-ad40-93d4de1329ab-kube-api-access-l9t8f" (OuterVolumeSpecName: "kube-api-access-l9t8f") pod "79de08f6-bcf7-4edc-ad40-93d4de1329ab" (UID: "79de08f6-bcf7-4edc-ad40-93d4de1329ab"). InnerVolumeSpecName "kube-api-access-l9t8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.270517 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79de08f6-bcf7-4edc-ad40-93d4de1329ab-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "79de08f6-bcf7-4edc-ad40-93d4de1329ab" (UID: "79de08f6-bcf7-4edc-ad40-93d4de1329ab"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.311131 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-78d4c99685-nm45j"] Jan 28 15:20:23 crc kubenswrapper[4959]: E0128 15:20:23.311407 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41a63f0b-93c0-4c28-b490-e92daa53f408" containerName="controller-manager" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.311421 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="41a63f0b-93c0-4c28-b490-e92daa53f408" containerName="controller-manager" Jan 28 15:20:23 crc kubenswrapper[4959]: E0128 15:20:23.311446 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79de08f6-bcf7-4edc-ad40-93d4de1329ab" containerName="route-controller-manager" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.311453 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="79de08f6-bcf7-4edc-ad40-93d4de1329ab" containerName="route-controller-manager" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.311545 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="41a63f0b-93c0-4c28-b490-e92daa53f408" containerName="controller-manager" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.311562 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="79de08f6-bcf7-4edc-ad40-93d4de1329ab" containerName="route-controller-manager" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.312009 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.332062 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c99685-nm45j"] Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.362745 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-client-ca\") pod \"41a63f0b-93c0-4c28-b490-e92daa53f408\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.362852 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-proxy-ca-bundles\") pod \"41a63f0b-93c0-4c28-b490-e92daa53f408\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.362901 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s797f\" (UniqueName: \"kubernetes.io/projected/41a63f0b-93c0-4c28-b490-e92daa53f408-kube-api-access-s797f\") pod \"41a63f0b-93c0-4c28-b490-e92daa53f408\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.362948 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41a63f0b-93c0-4c28-b490-e92daa53f408-serving-cert\") pod \"41a63f0b-93c0-4c28-b490-e92daa53f408\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.362982 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-config\") pod \"41a63f0b-93c0-4c28-b490-e92daa53f408\" (UID: \"41a63f0b-93c0-4c28-b490-e92daa53f408\") " Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363222 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0752de24-139e-4646-b96b-c8fa75e3ac6f-serving-cert\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363303 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-config\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363332 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-client-ca\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363361 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lscfk\" (UniqueName: \"kubernetes.io/projected/0752de24-139e-4646-b96b-c8fa75e3ac6f-kube-api-access-lscfk\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363479 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-proxy-ca-bundles\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363543 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79de08f6-bcf7-4edc-ad40-93d4de1329ab-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363656 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363730 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79de08f6-bcf7-4edc-ad40-93d4de1329ab-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363755 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9t8f\" (UniqueName: \"kubernetes.io/projected/79de08f6-bcf7-4edc-ad40-93d4de1329ab-kube-api-access-l9t8f\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.363963 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-client-ca" (OuterVolumeSpecName: "client-ca") pod "41a63f0b-93c0-4c28-b490-e92daa53f408" (UID: "41a63f0b-93c0-4c28-b490-e92daa53f408"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.364082 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "41a63f0b-93c0-4c28-b490-e92daa53f408" (UID: "41a63f0b-93c0-4c28-b490-e92daa53f408"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.364167 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-config" (OuterVolumeSpecName: "config") pod "41a63f0b-93c0-4c28-b490-e92daa53f408" (UID: "41a63f0b-93c0-4c28-b490-e92daa53f408"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.374084 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a63f0b-93c0-4c28-b490-e92daa53f408-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "41a63f0b-93c0-4c28-b490-e92daa53f408" (UID: "41a63f0b-93c0-4c28-b490-e92daa53f408"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.374094 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41a63f0b-93c0-4c28-b490-e92daa53f408-kube-api-access-s797f" (OuterVolumeSpecName: "kube-api-access-s797f") pod "41a63f0b-93c0-4c28-b490-e92daa53f408" (UID: "41a63f0b-93c0-4c28-b490-e92daa53f408"). InnerVolumeSpecName "kube-api-access-s797f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465408 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-proxy-ca-bundles\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465507 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0752de24-139e-4646-b96b-c8fa75e3ac6f-serving-cert\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465556 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-config\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465581 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-client-ca\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465602 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lscfk\" (UniqueName: \"kubernetes.io/projected/0752de24-139e-4646-b96b-c8fa75e3ac6f-kube-api-access-lscfk\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465678 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465691 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s797f\" (UniqueName: \"kubernetes.io/projected/41a63f0b-93c0-4c28-b490-e92daa53f408-kube-api-access-s797f\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465705 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41a63f0b-93c0-4c28-b490-e92daa53f408-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465714 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.465725 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41a63f0b-93c0-4c28-b490-e92daa53f408-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.467342 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-proxy-ca-bundles\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.468372 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-client-ca\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.468896 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-config\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.471308 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0752de24-139e-4646-b96b-c8fa75e3ac6f-serving-cert\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.483770 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lscfk\" (UniqueName: \"kubernetes.io/projected/0752de24-139e-4646-b96b-c8fa75e3ac6f-kube-api-access-lscfk\") pod \"controller-manager-78d4c99685-nm45j\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.631361 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:23 crc kubenswrapper[4959]: I0128 15:20:23.849228 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c99685-nm45j"] Jan 28 15:20:23 crc kubenswrapper[4959]: W0128 15:20:23.854739 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0752de24_139e_4646_b96b_c8fa75e3ac6f.slice/crio-660d7c15469bd5f71a59488bb08cdbd68a49499bf236fc43a0841db0060be704 WatchSource:0}: Error finding container 660d7c15469bd5f71a59488bb08cdbd68a49499bf236fc43a0841db0060be704: Status 404 returned error can't find the container with id 660d7c15469bd5f71a59488bb08cdbd68a49499bf236fc43a0841db0060be704 Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.191808 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.193001 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" event={"ID":"0752de24-139e-4646-b96b-c8fa75e3ac6f","Type":"ContainerStarted","Data":"3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1"} Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.193135 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" event={"ID":"0752de24-139e-4646-b96b-c8fa75e3ac6f","Type":"ContainerStarted","Data":"660d7c15469bd5f71a59488bb08cdbd68a49499bf236fc43a0841db0060be704"} Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.193264 4959 patch_prober.go:28] interesting pod/downloads-7954f5f757-gqkxc container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.193310 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.193323 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-gqkxc" podUID="e84ed88e-eaf6-433c-b930-93f13ed09fcf" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.194018 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.202078 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.265092 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" podStartSLOduration=13.265064163 podStartE2EDuration="13.265064163s" podCreationTimestamp="2026-01-28 15:20:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:24.262431467 +0000 UTC m=+207.708337860" watchObservedRunningTime="2026-01-28 15:20:24.265064163 +0000 UTC m=+207.710970546" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.317984 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g"] Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.330995 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-75b69fd7b6-lgj9g"] Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.363054 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x"] Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.367530 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7965ddf678-wnq9x"] Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.597459 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41a63f0b-93c0-4c28-b490-e92daa53f408" path="/var/lib/kubelet/pods/41a63f0b-93c0-4c28-b490-e92daa53f408/volumes" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.598051 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79de08f6-bcf7-4edc-ad40-93d4de1329ab" path="/var/lib/kubelet/pods/79de08f6-bcf7-4edc-ad40-93d4de1329ab/volumes" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.625918 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.685950 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6905576b-e5ea-424f-8060-d4e2d3ceef14-kubelet-dir\") pod \"6905576b-e5ea-424f-8060-d4e2d3ceef14\" (UID: \"6905576b-e5ea-424f-8060-d4e2d3ceef14\") " Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.686138 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6905576b-e5ea-424f-8060-d4e2d3ceef14-kube-api-access\") pod \"6905576b-e5ea-424f-8060-d4e2d3ceef14\" (UID: \"6905576b-e5ea-424f-8060-d4e2d3ceef14\") " Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.687318 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6905576b-e5ea-424f-8060-d4e2d3ceef14-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6905576b-e5ea-424f-8060-d4e2d3ceef14" (UID: "6905576b-e5ea-424f-8060-d4e2d3ceef14"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.712247 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6905576b-e5ea-424f-8060-d4e2d3ceef14-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6905576b-e5ea-424f-8060-d4e2d3ceef14" (UID: "6905576b-e5ea-424f-8060-d4e2d3ceef14"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.787570 4959 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6905576b-e5ea-424f-8060-d4e2d3ceef14-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:24 crc kubenswrapper[4959]: I0128 15:20:24.787615 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6905576b-e5ea-424f-8060-d4e2d3ceef14-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:25 crc kubenswrapper[4959]: I0128 15:20:25.201479 4959 generic.go:334] "Generic (PLEG): container finished" podID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerID="415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206" exitCode=0 Jan 28 15:20:25 crc kubenswrapper[4959]: I0128 15:20:25.201746 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vd8kz" event={"ID":"ef01ad95-eb28-48c8-9d58-3ea696164442","Type":"ContainerDied","Data":"415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206"} Jan 28 15:20:25 crc kubenswrapper[4959]: I0128 15:20:25.205748 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"6905576b-e5ea-424f-8060-d4e2d3ceef14","Type":"ContainerDied","Data":"6a60e915aff71143cade18be05e00f3de47f4977e863ec16aba8b9264c7c446a"} Jan 28 15:20:25 crc kubenswrapper[4959]: I0128 15:20:25.205830 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a60e915aff71143cade18be05e00f3de47f4977e863ec16aba8b9264c7c446a" Jan 28 15:20:25 crc kubenswrapper[4959]: I0128 15:20:25.205768 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.112598 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8"] Jan 28 15:20:26 crc kubenswrapper[4959]: E0128 15:20:26.113298 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6905576b-e5ea-424f-8060-d4e2d3ceef14" containerName="pruner" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.113314 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="6905576b-e5ea-424f-8060-d4e2d3ceef14" containerName="pruner" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.113430 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="6905576b-e5ea-424f-8060-d4e2d3ceef14" containerName="pruner" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.113867 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.116328 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.116458 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.118550 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.118705 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.118846 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.123035 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.127804 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8"] Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.208793 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-client-ca\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.208868 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plxqk\" (UniqueName: \"kubernetes.io/projected/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-kube-api-access-plxqk\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.208905 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-config\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.208952 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-serving-cert\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.219251 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vd8kz" event={"ID":"ef01ad95-eb28-48c8-9d58-3ea696164442","Type":"ContainerStarted","Data":"8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0"} Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.245585 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vd8kz" podStartSLOduration=2.812531274 podStartE2EDuration="54.245559303s" podCreationTimestamp="2026-01-28 15:19:32 +0000 UTC" firstStartedPulling="2026-01-28 15:19:34.358582775 +0000 UTC m=+157.804489158" lastFinishedPulling="2026-01-28 15:20:25.791610794 +0000 UTC m=+209.237517187" observedRunningTime="2026-01-28 15:20:26.239810759 +0000 UTC m=+209.685717192" watchObservedRunningTime="2026-01-28 15:20:26.245559303 +0000 UTC m=+209.691465686" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.310719 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-serving-cert\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.310790 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-client-ca\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.310874 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plxqk\" (UniqueName: \"kubernetes.io/projected/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-kube-api-access-plxqk\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.310906 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-config\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.312537 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-client-ca\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.313423 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-config\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.328868 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-serving-cert\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.330666 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plxqk\" (UniqueName: \"kubernetes.io/projected/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-kube-api-access-plxqk\") pod \"route-controller-manager-7db9b7df74-x5fm8\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.433537 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:26 crc kubenswrapper[4959]: I0128 15:20:26.953249 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8"] Jan 28 15:20:27 crc kubenswrapper[4959]: I0128 15:20:27.228860 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" event={"ID":"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807","Type":"ContainerStarted","Data":"0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3"} Jan 28 15:20:27 crc kubenswrapper[4959]: I0128 15:20:27.228928 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" event={"ID":"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807","Type":"ContainerStarted","Data":"1bdc6756eb444bfca77c9c33e9404b82dcb8db1008e7a4ca10fd557e1eed3405"} Jan 28 15:20:27 crc kubenswrapper[4959]: I0128 15:20:27.229746 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:27 crc kubenswrapper[4959]: I0128 15:20:27.254526 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" podStartSLOduration=16.25450369 podStartE2EDuration="16.25450369s" podCreationTimestamp="2026-01-28 15:20:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:27.254224844 +0000 UTC m=+210.700131247" watchObservedRunningTime="2026-01-28 15:20:27.25450369 +0000 UTC m=+210.700410063" Jan 28 15:20:27 crc kubenswrapper[4959]: I0128 15:20:27.684182 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:28 crc kubenswrapper[4959]: I0128 15:20:28.689245 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:20:28 crc kubenswrapper[4959]: I0128 15:20:28.690239 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:20:28 crc kubenswrapper[4959]: I0128 15:20:28.690312 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:20:28 crc kubenswrapper[4959]: I0128 15:20:28.691167 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:20:28 crc kubenswrapper[4959]: I0128 15:20:28.691432 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259" gracePeriod=600 Jan 28 15:20:29 crc kubenswrapper[4959]: I0128 15:20:29.242034 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259" exitCode=0 Jan 28 15:20:29 crc kubenswrapper[4959]: I0128 15:20:29.242121 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259"} Jan 28 15:20:29 crc kubenswrapper[4959]: I0128 15:20:29.247711 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ckgq" event={"ID":"48ead489-a40c-4d3c-a18f-83287043b523","Type":"ContainerStarted","Data":"aba459eb1a3a6dec43921aa5699b94b18bd80f12ddb33db29e15254cbc55b54b"} Jan 28 15:20:30 crc kubenswrapper[4959]: I0128 15:20:30.256630 4959 generic.go:334] "Generic (PLEG): container finished" podID="48ead489-a40c-4d3c-a18f-83287043b523" containerID="aba459eb1a3a6dec43921aa5699b94b18bd80f12ddb33db29e15254cbc55b54b" exitCode=0 Jan 28 15:20:30 crc kubenswrapper[4959]: I0128 15:20:30.256703 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ckgq" event={"ID":"48ead489-a40c-4d3c-a18f-83287043b523","Type":"ContainerDied","Data":"aba459eb1a3a6dec43921aa5699b94b18bd80f12ddb33db29e15254cbc55b54b"} Jan 28 15:20:30 crc kubenswrapper[4959]: I0128 15:20:30.264313 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"2721ee3c317d213a4abd4520e3756a7317e857dd25f041e3dd5a379f57b76dad"} Jan 28 15:20:31 crc kubenswrapper[4959]: I0128 15:20:31.090263 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-gqkxc" Jan 28 15:20:32 crc kubenswrapper[4959]: I0128 15:20:32.719715 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:20:32 crc kubenswrapper[4959]: I0128 15:20:32.720604 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:20:32 crc kubenswrapper[4959]: I0128 15:20:32.904552 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:20:33 crc kubenswrapper[4959]: I0128 15:20:33.293733 4959 generic.go:334] "Generic (PLEG): container finished" podID="13b2e969-f501-4266-bcf2-76514bf739c2" containerID="0b6d570b9b9c67e59f2bce9a98e7ab002f69b9d7615129ba779aab7eaf942e0c" exitCode=0 Jan 28 15:20:33 crc kubenswrapper[4959]: I0128 15:20:33.293944 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r74c4" event={"ID":"13b2e969-f501-4266-bcf2-76514bf739c2","Type":"ContainerDied","Data":"0b6d570b9b9c67e59f2bce9a98e7ab002f69b9d7615129ba779aab7eaf942e0c"} Jan 28 15:20:33 crc kubenswrapper[4959]: I0128 15:20:33.303848 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ckgq" event={"ID":"48ead489-a40c-4d3c-a18f-83287043b523","Type":"ContainerStarted","Data":"b8a0681ecd834d25ca62d81e2ea671c8c4b87a8a4a847a0bf1fe4548eefb77ff"} Jan 28 15:20:33 crc kubenswrapper[4959]: I0128 15:20:33.343377 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8ckgq" podStartSLOduration=3.446092292 podStartE2EDuration="1m0.343352598s" podCreationTimestamp="2026-01-28 15:19:33 +0000 UTC" firstStartedPulling="2026-01-28 15:19:35.482658453 +0000 UTC m=+158.928564836" lastFinishedPulling="2026-01-28 15:20:32.379918769 +0000 UTC m=+215.825825142" observedRunningTime="2026-01-28 15:20:33.34301464 +0000 UTC m=+216.788921023" watchObservedRunningTime="2026-01-28 15:20:33.343352598 +0000 UTC m=+216.789258981" Jan 28 15:20:33 crc kubenswrapper[4959]: I0128 15:20:33.363226 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:20:34 crc kubenswrapper[4959]: I0128 15:20:34.215167 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:20:34 crc kubenswrapper[4959]: I0128 15:20:34.215750 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:20:35 crc kubenswrapper[4959]: I0128 15:20:35.258329 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8ckgq" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="registry-server" probeResult="failure" output=< Jan 28 15:20:35 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 15:20:35 crc kubenswrapper[4959]: > Jan 28 15:20:35 crc kubenswrapper[4959]: I0128 15:20:35.335119 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r74c4" event={"ID":"13b2e969-f501-4266-bcf2-76514bf739c2","Type":"ContainerStarted","Data":"904ca261e77ec984d0bd0febdad1e261345a1676961e0a286627eebd06523f6c"} Jan 28 15:20:35 crc kubenswrapper[4959]: I0128 15:20:35.364843 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r74c4" podStartSLOduration=4.8228583149999995 podStartE2EDuration="1m5.364823254s" podCreationTimestamp="2026-01-28 15:19:30 +0000 UTC" firstStartedPulling="2026-01-28 15:19:33.172370543 +0000 UTC m=+156.618276926" lastFinishedPulling="2026-01-28 15:20:33.714335482 +0000 UTC m=+217.160241865" observedRunningTime="2026-01-28 15:20:35.361408438 +0000 UTC m=+218.807314841" watchObservedRunningTime="2026-01-28 15:20:35.364823254 +0000 UTC m=+218.810729627" Jan 28 15:20:36 crc kubenswrapper[4959]: I0128 15:20:36.345424 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxfjz" event={"ID":"5b46d2a5-2d15-4841-97a6-b3768e4df1d4","Type":"ContainerStarted","Data":"f83f326de2c0e36942e6a6c4700ac50a0b6bb5f1b1956cf27a0f0448d9d861d6"} Jan 28 15:20:36 crc kubenswrapper[4959]: I0128 15:20:36.421726 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vd8kz"] Jan 28 15:20:36 crc kubenswrapper[4959]: I0128 15:20:36.421999 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vd8kz" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerName="registry-server" containerID="cri-o://8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0" gracePeriod=2 Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.066272 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.088452 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmgg9\" (UniqueName: \"kubernetes.io/projected/ef01ad95-eb28-48c8-9d58-3ea696164442-kube-api-access-xmgg9\") pod \"ef01ad95-eb28-48c8-9d58-3ea696164442\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.088510 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-catalog-content\") pod \"ef01ad95-eb28-48c8-9d58-3ea696164442\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.088639 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-utilities\") pod \"ef01ad95-eb28-48c8-9d58-3ea696164442\" (UID: \"ef01ad95-eb28-48c8-9d58-3ea696164442\") " Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.089723 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-utilities" (OuterVolumeSpecName: "utilities") pod "ef01ad95-eb28-48c8-9d58-3ea696164442" (UID: "ef01ad95-eb28-48c8-9d58-3ea696164442"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.101519 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef01ad95-eb28-48c8-9d58-3ea696164442-kube-api-access-xmgg9" (OuterVolumeSpecName: "kube-api-access-xmgg9") pod "ef01ad95-eb28-48c8-9d58-3ea696164442" (UID: "ef01ad95-eb28-48c8-9d58-3ea696164442"). InnerVolumeSpecName "kube-api-access-xmgg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.190440 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.190476 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmgg9\" (UniqueName: \"kubernetes.io/projected/ef01ad95-eb28-48c8-9d58-3ea696164442-kube-api-access-xmgg9\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.353548 4959 generic.go:334] "Generic (PLEG): container finished" podID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerID="8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0" exitCode=0 Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.353621 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vd8kz" event={"ID":"ef01ad95-eb28-48c8-9d58-3ea696164442","Type":"ContainerDied","Data":"8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0"} Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.353658 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vd8kz" event={"ID":"ef01ad95-eb28-48c8-9d58-3ea696164442","Type":"ContainerDied","Data":"002afc0f535aceb8ba69648e5c883754f51d706aaae0a8a282e6413f7d4aed6a"} Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.353680 4959 scope.go:117] "RemoveContainer" containerID="8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.353838 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vd8kz" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.357124 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dz2cj" event={"ID":"288654ae-ff9c-4ab8-999a-29ca0266da2a","Type":"ContainerStarted","Data":"71c9c4e11553c331ee1292c6085d1c1b4dd933b5eb74679014a6a1966417bcf8"} Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.359550 4959 generic.go:334] "Generic (PLEG): container finished" podID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerID="f83f326de2c0e36942e6a6c4700ac50a0b6bb5f1b1956cf27a0f0448d9d861d6" exitCode=0 Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.359582 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxfjz" event={"ID":"5b46d2a5-2d15-4841-97a6-b3768e4df1d4","Type":"ContainerDied","Data":"f83f326de2c0e36942e6a6c4700ac50a0b6bb5f1b1956cf27a0f0448d9d861d6"} Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.362526 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2tkvn" event={"ID":"575b26ae-87aa-469e-9bd9-1b4384d80093","Type":"ContainerStarted","Data":"7f9c2bc05a6e5188c83d8856cfc405195ef7200e9757929685953c0ca21cd415"} Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.874385 4959 scope.go:117] "RemoveContainer" containerID="415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.908375 4959 scope.go:117] "RemoveContainer" containerID="aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.925633 4959 scope.go:117] "RemoveContainer" containerID="8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0" Jan 28 15:20:37 crc kubenswrapper[4959]: E0128 15:20:37.926208 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0\": container with ID starting with 8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0 not found: ID does not exist" containerID="8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.926264 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0"} err="failed to get container status \"8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0\": rpc error: code = NotFound desc = could not find container \"8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0\": container with ID starting with 8a14b60740269a7234542842adfe29cc6d27887c369e8fc7836f869db0e23ff0 not found: ID does not exist" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.926295 4959 scope.go:117] "RemoveContainer" containerID="415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206" Jan 28 15:20:37 crc kubenswrapper[4959]: E0128 15:20:37.926976 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206\": container with ID starting with 415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206 not found: ID does not exist" containerID="415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.927085 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206"} err="failed to get container status \"415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206\": rpc error: code = NotFound desc = could not find container \"415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206\": container with ID starting with 415458f1ed9a5c48c691d5c0c42d854b85d53db0774119f0dfda4fe3389f5206 not found: ID does not exist" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.927181 4959 scope.go:117] "RemoveContainer" containerID="aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37" Jan 28 15:20:37 crc kubenswrapper[4959]: E0128 15:20:37.927571 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37\": container with ID starting with aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37 not found: ID does not exist" containerID="aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37" Jan 28 15:20:37 crc kubenswrapper[4959]: I0128 15:20:37.927591 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37"} err="failed to get container status \"aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37\": rpc error: code = NotFound desc = could not find container \"aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37\": container with ID starting with aa7aa3f6c294d78c31c51219a2b9c46928089aff62d790f31462ea7ce321df37 not found: ID does not exist" Jan 28 15:20:38 crc kubenswrapper[4959]: I0128 15:20:38.279100 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ef01ad95-eb28-48c8-9d58-3ea696164442" (UID: "ef01ad95-eb28-48c8-9d58-3ea696164442"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:20:38 crc kubenswrapper[4959]: I0128 15:20:38.308487 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef01ad95-eb28-48c8-9d58-3ea696164442-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:38 crc kubenswrapper[4959]: I0128 15:20:38.375562 4959 generic.go:334] "Generic (PLEG): container finished" podID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerID="71c9c4e11553c331ee1292c6085d1c1b4dd933b5eb74679014a6a1966417bcf8" exitCode=0 Jan 28 15:20:38 crc kubenswrapper[4959]: I0128 15:20:38.375659 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dz2cj" event={"ID":"288654ae-ff9c-4ab8-999a-29ca0266da2a","Type":"ContainerDied","Data":"71c9c4e11553c331ee1292c6085d1c1b4dd933b5eb74679014a6a1966417bcf8"} Jan 28 15:20:38 crc kubenswrapper[4959]: I0128 15:20:38.383618 4959 generic.go:334] "Generic (PLEG): container finished" podID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerID="7f9c2bc05a6e5188c83d8856cfc405195ef7200e9757929685953c0ca21cd415" exitCode=0 Jan 28 15:20:38 crc kubenswrapper[4959]: I0128 15:20:38.383690 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2tkvn" event={"ID":"575b26ae-87aa-469e-9bd9-1b4384d80093","Type":"ContainerDied","Data":"7f9c2bc05a6e5188c83d8856cfc405195ef7200e9757929685953c0ca21cd415"} Jan 28 15:20:38 crc kubenswrapper[4959]: I0128 15:20:38.581140 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vd8kz"] Jan 28 15:20:38 crc kubenswrapper[4959]: I0128 15:20:38.604089 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vd8kz"] Jan 28 15:20:40 crc kubenswrapper[4959]: I0128 15:20:40.411517 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-nk6xq"] Jan 28 15:20:40 crc kubenswrapper[4959]: I0128 15:20:40.469692 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:20:40 crc kubenswrapper[4959]: I0128 15:20:40.469746 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:20:40 crc kubenswrapper[4959]: I0128 15:20:40.559715 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:20:40 crc kubenswrapper[4959]: I0128 15:20:40.595319 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" path="/var/lib/kubelet/pods/ef01ad95-eb28-48c8-9d58-3ea696164442/volumes" Jan 28 15:20:41 crc kubenswrapper[4959]: I0128 15:20:41.452336 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:20:42 crc kubenswrapper[4959]: I0128 15:20:42.413830 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxfjz" event={"ID":"5b46d2a5-2d15-4841-97a6-b3768e4df1d4","Type":"ContainerStarted","Data":"0a650494cac29e0dec741b654bda907d9f86f226bba06113a3dcf9e1d1d04dab"} Jan 28 15:20:42 crc kubenswrapper[4959]: I0128 15:20:42.417084 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2tkvn" event={"ID":"575b26ae-87aa-469e-9bd9-1b4384d80093","Type":"ContainerStarted","Data":"4485c1b986253cb9031ee7698b53bd6502153d0c492dc9861317371807ab950b"} Jan 28 15:20:42 crc kubenswrapper[4959]: I0128 15:20:42.419965 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8lcg" event={"ID":"b181a50d-3075-479c-b460-bd2addc3e6b3","Type":"ContainerStarted","Data":"23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4"} Jan 28 15:20:42 crc kubenswrapper[4959]: I0128 15:20:42.617589 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r74c4"] Jan 28 15:20:43 crc kubenswrapper[4959]: I0128 15:20:43.429382 4959 generic.go:334] "Generic (PLEG): container finished" podID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerID="23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4" exitCode=0 Jan 28 15:20:43 crc kubenswrapper[4959]: I0128 15:20:43.429525 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8lcg" event={"ID":"b181a50d-3075-479c-b460-bd2addc3e6b3","Type":"ContainerDied","Data":"23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4"} Jan 28 15:20:43 crc kubenswrapper[4959]: I0128 15:20:43.430188 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r74c4" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" containerName="registry-server" containerID="cri-o://904ca261e77ec984d0bd0febdad1e261345a1676961e0a286627eebd06523f6c" gracePeriod=2 Jan 28 15:20:43 crc kubenswrapper[4959]: I0128 15:20:43.479914 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2tkvn" podStartSLOduration=4.570602749 podStartE2EDuration="1m10.479892176s" podCreationTimestamp="2026-01-28 15:19:33 +0000 UTC" firstStartedPulling="2026-01-28 15:19:35.434152866 +0000 UTC m=+158.880059249" lastFinishedPulling="2026-01-28 15:20:41.343442293 +0000 UTC m=+224.789348676" observedRunningTime="2026-01-28 15:20:43.476538842 +0000 UTC m=+226.922445235" watchObservedRunningTime="2026-01-28 15:20:43.479892176 +0000 UTC m=+226.925798549" Jan 28 15:20:43 crc kubenswrapper[4959]: I0128 15:20:43.517976 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zxfjz" podStartSLOduration=6.015776921 podStartE2EDuration="1m14.517949928s" podCreationTimestamp="2026-01-28 15:19:29 +0000 UTC" firstStartedPulling="2026-01-28 15:19:33.209227824 +0000 UTC m=+156.655134207" lastFinishedPulling="2026-01-28 15:20:41.711400831 +0000 UTC m=+225.157307214" observedRunningTime="2026-01-28 15:20:43.51003036 +0000 UTC m=+226.955936733" watchObservedRunningTime="2026-01-28 15:20:43.517949928 +0000 UTC m=+226.963856321" Jan 28 15:20:43 crc kubenswrapper[4959]: I0128 15:20:43.655467 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:20:43 crc kubenswrapper[4959]: I0128 15:20:43.656969 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:20:44 crc kubenswrapper[4959]: I0128 15:20:44.257960 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:20:44 crc kubenswrapper[4959]: I0128 15:20:44.311627 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:20:44 crc kubenswrapper[4959]: I0128 15:20:44.439947 4959 generic.go:334] "Generic (PLEG): container finished" podID="13b2e969-f501-4266-bcf2-76514bf739c2" containerID="904ca261e77ec984d0bd0febdad1e261345a1676961e0a286627eebd06523f6c" exitCode=0 Jan 28 15:20:44 crc kubenswrapper[4959]: I0128 15:20:44.440034 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r74c4" event={"ID":"13b2e969-f501-4266-bcf2-76514bf739c2","Type":"ContainerDied","Data":"904ca261e77ec984d0bd0febdad1e261345a1676961e0a286627eebd06523f6c"} Jan 28 15:20:44 crc kubenswrapper[4959]: I0128 15:20:44.699024 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2tkvn" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="registry-server" probeResult="failure" output=< Jan 28 15:20:44 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 15:20:44 crc kubenswrapper[4959]: > Jan 28 15:20:44 crc kubenswrapper[4959]: I0128 15:20:44.819263 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8ckgq"] Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.449668 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8ckgq" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="registry-server" containerID="cri-o://b8a0681ecd834d25ca62d81e2ea671c8c4b87a8a4a847a0bf1fe4548eefb77ff" gracePeriod=2 Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.450192 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dz2cj" event={"ID":"288654ae-ff9c-4ab8-999a-29ca0266da2a","Type":"ContainerStarted","Data":"ab4ab4e6656581d7f764888cb2a806946cd194612e2d26761964a58f67dee94f"} Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.768974 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.835377 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-catalog-content\") pod \"13b2e969-f501-4266-bcf2-76514bf739c2\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.835651 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-utilities\") pod \"13b2e969-f501-4266-bcf2-76514bf739c2\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.835705 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6549\" (UniqueName: \"kubernetes.io/projected/13b2e969-f501-4266-bcf2-76514bf739c2-kube-api-access-j6549\") pod \"13b2e969-f501-4266-bcf2-76514bf739c2\" (UID: \"13b2e969-f501-4266-bcf2-76514bf739c2\") " Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.837694 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-utilities" (OuterVolumeSpecName: "utilities") pod "13b2e969-f501-4266-bcf2-76514bf739c2" (UID: "13b2e969-f501-4266-bcf2-76514bf739c2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.857384 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13b2e969-f501-4266-bcf2-76514bf739c2-kube-api-access-j6549" (OuterVolumeSpecName: "kube-api-access-j6549") pod "13b2e969-f501-4266-bcf2-76514bf739c2" (UID: "13b2e969-f501-4266-bcf2-76514bf739c2"). InnerVolumeSpecName "kube-api-access-j6549". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.889748 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13b2e969-f501-4266-bcf2-76514bf739c2" (UID: "13b2e969-f501-4266-bcf2-76514bf739c2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.937947 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.937991 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6549\" (UniqueName: \"kubernetes.io/projected/13b2e969-f501-4266-bcf2-76514bf739c2-kube-api-access-j6549\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:45 crc kubenswrapper[4959]: I0128 15:20:45.938006 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13b2e969-f501-4266-bcf2-76514bf739c2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.459556 4959 generic.go:334] "Generic (PLEG): container finished" podID="48ead489-a40c-4d3c-a18f-83287043b523" containerID="b8a0681ecd834d25ca62d81e2ea671c8c4b87a8a4a847a0bf1fe4548eefb77ff" exitCode=0 Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.459626 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ckgq" event={"ID":"48ead489-a40c-4d3c-a18f-83287043b523","Type":"ContainerDied","Data":"b8a0681ecd834d25ca62d81e2ea671c8c4b87a8a4a847a0bf1fe4548eefb77ff"} Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.462370 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r74c4" event={"ID":"13b2e969-f501-4266-bcf2-76514bf739c2","Type":"ContainerDied","Data":"b0f89077218dd11a009f55de013d06d828073b7217832db143e71e320ac405e3"} Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.462429 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r74c4" Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.462465 4959 scope.go:117] "RemoveContainer" containerID="904ca261e77ec984d0bd0febdad1e261345a1676961e0a286627eebd06523f6c" Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.497497 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dz2cj" podStartSLOduration=6.883925913 podStartE2EDuration="1m16.497475388s" podCreationTimestamp="2026-01-28 15:19:30 +0000 UTC" firstStartedPulling="2026-01-28 15:19:34.437608616 +0000 UTC m=+157.883514999" lastFinishedPulling="2026-01-28 15:20:44.051158091 +0000 UTC m=+227.497064474" observedRunningTime="2026-01-28 15:20:46.494020212 +0000 UTC m=+229.939926605" watchObservedRunningTime="2026-01-28 15:20:46.497475388 +0000 UTC m=+229.943381771" Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.512062 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r74c4"] Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.515856 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r74c4"] Jan 28 15:20:46 crc kubenswrapper[4959]: I0128 15:20:46.595199 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" path="/var/lib/kubelet/pods/13b2e969-f501-4266-bcf2-76514bf739c2/volumes" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.627139 4959 scope.go:117] "RemoveContainer" containerID="0b6d570b9b9c67e59f2bce9a98e7ab002f69b9d7615129ba779aab7eaf942e0c" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.706640 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.767271 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cknrm\" (UniqueName: \"kubernetes.io/projected/48ead489-a40c-4d3c-a18f-83287043b523-kube-api-access-cknrm\") pod \"48ead489-a40c-4d3c-a18f-83287043b523\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.767360 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-catalog-content\") pod \"48ead489-a40c-4d3c-a18f-83287043b523\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.767424 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-utilities\") pod \"48ead489-a40c-4d3c-a18f-83287043b523\" (UID: \"48ead489-a40c-4d3c-a18f-83287043b523\") " Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.768641 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-utilities" (OuterVolumeSpecName: "utilities") pod "48ead489-a40c-4d3c-a18f-83287043b523" (UID: "48ead489-a40c-4d3c-a18f-83287043b523"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.783601 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48ead489-a40c-4d3c-a18f-83287043b523-kube-api-access-cknrm" (OuterVolumeSpecName: "kube-api-access-cknrm") pod "48ead489-a40c-4d3c-a18f-83287043b523" (UID: "48ead489-a40c-4d3c-a18f-83287043b523"). InnerVolumeSpecName "kube-api-access-cknrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.869230 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cknrm\" (UniqueName: \"kubernetes.io/projected/48ead489-a40c-4d3c-a18f-83287043b523-kube-api-access-cknrm\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.869274 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.893298 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "48ead489-a40c-4d3c-a18f-83287043b523" (UID: "48ead489-a40c-4d3c-a18f-83287043b523"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.958840 4959 scope.go:117] "RemoveContainer" containerID="8280103119d2cc8c2cf0e53e17a0d414f68c4cfffaf4db25abe3f8efd81035a5" Jan 28 15:20:47 crc kubenswrapper[4959]: I0128 15:20:47.970545 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48ead489-a40c-4d3c-a18f-83287043b523-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:48 crc kubenswrapper[4959]: I0128 15:20:48.476745 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8ckgq" Jan 28 15:20:48 crc kubenswrapper[4959]: I0128 15:20:48.476762 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ckgq" event={"ID":"48ead489-a40c-4d3c-a18f-83287043b523","Type":"ContainerDied","Data":"ce83d40c49189155a45b985bd04589535d94203aabf2464e7f8741537632784d"} Jan 28 15:20:48 crc kubenswrapper[4959]: I0128 15:20:48.504827 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8ckgq"] Jan 28 15:20:48 crc kubenswrapper[4959]: I0128 15:20:48.509679 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8ckgq"] Jan 28 15:20:48 crc kubenswrapper[4959]: I0128 15:20:48.597142 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48ead489-a40c-4d3c-a18f-83287043b523" path="/var/lib/kubelet/pods/48ead489-a40c-4d3c-a18f-83287043b523/volumes" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.077285 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.077351 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.129790 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.544378 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.555843 4959 scope.go:117] "RemoveContainer" containerID="b8a0681ecd834d25ca62d81e2ea671c8c4b87a8a4a847a0bf1fe4548eefb77ff" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.915735 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.915828 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.920919 4959 scope.go:117] "RemoveContainer" containerID="aba459eb1a3a6dec43921aa5699b94b18bd80f12ddb33db29e15254cbc55b54b" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.962645 4959 scope.go:117] "RemoveContainer" containerID="4b8e2a29a96c5ae77fe918e0dbd3536272582715ee2de5526b685ee57a1cfa63" Jan 28 15:20:50 crc kubenswrapper[4959]: I0128 15:20:50.979090 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.436736 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c99685-nm45j"] Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.437066 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" podUID="0752de24-139e-4646-b96b-c8fa75e3ac6f" containerName="controller-manager" containerID="cri-o://3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1" gracePeriod=30 Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.507269 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8lcg" event={"ID":"b181a50d-3075-479c-b460-bd2addc3e6b3","Type":"ContainerStarted","Data":"3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74"} Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.509741 4959 generic.go:334] "Generic (PLEG): container finished" podID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerID="d4e941319bb58e5ecd05b03273dae04808307e37ac6a4bb3860643a2adc18d77" exitCode=0 Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.509815 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dkw87" event={"ID":"9058c198-cfe2-496a-b045-d3650a0a36bf","Type":"ContainerDied","Data":"d4e941319bb58e5ecd05b03273dae04808307e37ac6a4bb3860643a2adc18d77"} Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.534433 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z8lcg" podStartSLOduration=4.925480757 podStartE2EDuration="1m21.534408452s" podCreationTimestamp="2026-01-28 15:19:30 +0000 UTC" firstStartedPulling="2026-01-28 15:19:34.312614251 +0000 UTC m=+157.758520634" lastFinishedPulling="2026-01-28 15:20:50.921541946 +0000 UTC m=+234.367448329" observedRunningTime="2026-01-28 15:20:51.534166916 +0000 UTC m=+234.980073329" watchObservedRunningTime="2026-01-28 15:20:51.534408452 +0000 UTC m=+234.980314835" Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.546415 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8"] Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.546726 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" podUID="4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" containerName="route-controller-manager" containerID="cri-o://0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3" gracePeriod=30 Jan 28 15:20:51 crc kubenswrapper[4959]: I0128 15:20:51.587271 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.158504 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.164413 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246209 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plxqk\" (UniqueName: \"kubernetes.io/projected/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-kube-api-access-plxqk\") pod \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246323 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-client-ca\") pod \"0752de24-139e-4646-b96b-c8fa75e3ac6f\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246344 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-proxy-ca-bundles\") pod \"0752de24-139e-4646-b96b-c8fa75e3ac6f\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246431 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-client-ca\") pod \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246455 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-config\") pod \"0752de24-139e-4646-b96b-c8fa75e3ac6f\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246477 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lscfk\" (UniqueName: \"kubernetes.io/projected/0752de24-139e-4646-b96b-c8fa75e3ac6f-kube-api-access-lscfk\") pod \"0752de24-139e-4646-b96b-c8fa75e3ac6f\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246509 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0752de24-139e-4646-b96b-c8fa75e3ac6f-serving-cert\") pod \"0752de24-139e-4646-b96b-c8fa75e3ac6f\" (UID: \"0752de24-139e-4646-b96b-c8fa75e3ac6f\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246540 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-serving-cert\") pod \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.246591 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-config\") pod \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\" (UID: \"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807\") " Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.247592 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-client-ca" (OuterVolumeSpecName: "client-ca") pod "0752de24-139e-4646-b96b-c8fa75e3ac6f" (UID: "0752de24-139e-4646-b96b-c8fa75e3ac6f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.247701 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "0752de24-139e-4646-b96b-c8fa75e3ac6f" (UID: "0752de24-139e-4646-b96b-c8fa75e3ac6f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.248308 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-config" (OuterVolumeSpecName: "config") pod "0752de24-139e-4646-b96b-c8fa75e3ac6f" (UID: "0752de24-139e-4646-b96b-c8fa75e3ac6f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.248839 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-client-ca" (OuterVolumeSpecName: "client-ca") pod "4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" (UID: "4fa86a3a-4d4e-4d68-88a2-7cf59cad2807"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.248938 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-config" (OuterVolumeSpecName: "config") pod "4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" (UID: "4fa86a3a-4d4e-4d68-88a2-7cf59cad2807"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.257804 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0752de24-139e-4646-b96b-c8fa75e3ac6f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0752de24-139e-4646-b96b-c8fa75e3ac6f" (UID: "0752de24-139e-4646-b96b-c8fa75e3ac6f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.257804 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-kube-api-access-plxqk" (OuterVolumeSpecName: "kube-api-access-plxqk") pod "4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" (UID: "4fa86a3a-4d4e-4d68-88a2-7cf59cad2807"). InnerVolumeSpecName "kube-api-access-plxqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.257958 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0752de24-139e-4646-b96b-c8fa75e3ac6f-kube-api-access-lscfk" (OuterVolumeSpecName: "kube-api-access-lscfk") pod "0752de24-139e-4646-b96b-c8fa75e3ac6f" (UID: "0752de24-139e-4646-b96b-c8fa75e3ac6f"). InnerVolumeSpecName "kube-api-access-lscfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.258728 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" (UID: "4fa86a3a-4d4e-4d68-88a2-7cf59cad2807"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348595 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348640 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plxqk\" (UniqueName: \"kubernetes.io/projected/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-kube-api-access-plxqk\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348660 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348673 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348685 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0752de24-139e-4646-b96b-c8fa75e3ac6f-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348698 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348712 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lscfk\" (UniqueName: \"kubernetes.io/projected/0752de24-139e-4646-b96b-c8fa75e3ac6f-kube-api-access-lscfk\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348723 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0752de24-139e-4646-b96b-c8fa75e3ac6f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.348737 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.517902 4959 generic.go:334] "Generic (PLEG): container finished" podID="4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" containerID="0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3" exitCode=0 Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.517975 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" event={"ID":"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807","Type":"ContainerDied","Data":"0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3"} Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.518010 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" event={"ID":"4fa86a3a-4d4e-4d68-88a2-7cf59cad2807","Type":"ContainerDied","Data":"1bdc6756eb444bfca77c9c33e9404b82dcb8db1008e7a4ca10fd557e1eed3405"} Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.518030 4959 scope.go:117] "RemoveContainer" containerID="0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.518131 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.525036 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dkw87" event={"ID":"9058c198-cfe2-496a-b045-d3650a0a36bf","Type":"ContainerStarted","Data":"23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e"} Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.528999 4959 generic.go:334] "Generic (PLEG): container finished" podID="0752de24-139e-4646-b96b-c8fa75e3ac6f" containerID="3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1" exitCode=0 Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.535361 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" event={"ID":"0752de24-139e-4646-b96b-c8fa75e3ac6f","Type":"ContainerDied","Data":"3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1"} Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.535437 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" event={"ID":"0752de24-139e-4646-b96b-c8fa75e3ac6f","Type":"ContainerDied","Data":"660d7c15469bd5f71a59488bb08cdbd68a49499bf236fc43a0841db0060be704"} Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.535476 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78d4c99685-nm45j" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.558162 4959 scope.go:117] "RemoveContainer" containerID="0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3" Jan 28 15:20:52 crc kubenswrapper[4959]: E0128 15:20:52.559383 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3\": container with ID starting with 0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3 not found: ID does not exist" containerID="0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.559434 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3"} err="failed to get container status \"0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3\": rpc error: code = NotFound desc = could not find container \"0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3\": container with ID starting with 0db8c253d218d7d9e15dbfb5ab15fcc9cfeec5702e81ea235200c797e58a0cc3 not found: ID does not exist" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.559463 4959 scope.go:117] "RemoveContainer" containerID="3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.565051 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dkw87" podStartSLOduration=3.8858981569999997 podStartE2EDuration="1m21.564982231s" podCreationTimestamp="2026-01-28 15:19:31 +0000 UTC" firstStartedPulling="2026-01-28 15:19:34.309649717 +0000 UTC m=+157.755556100" lastFinishedPulling="2026-01-28 15:20:51.988733791 +0000 UTC m=+235.434640174" observedRunningTime="2026-01-28 15:20:52.555930755 +0000 UTC m=+236.001837148" watchObservedRunningTime="2026-01-28 15:20:52.564982231 +0000 UTC m=+236.010888624" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.575858 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8"] Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.583282 4959 scope.go:117] "RemoveContainer" containerID="3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1" Jan 28 15:20:52 crc kubenswrapper[4959]: E0128 15:20:52.583953 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1\": container with ID starting with 3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1 not found: ID does not exist" containerID="3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.584020 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1"} err="failed to get container status \"3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1\": rpc error: code = NotFound desc = could not find container \"3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1\": container with ID starting with 3970cfd1cc9e4b1c06b570156a801dd23bc3569fc2a6c2b990a16152ed52ebf1 not found: ID does not exist" Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.600840 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7db9b7df74-x5fm8"] Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.600889 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c99685-nm45j"] Jan 28 15:20:52 crc kubenswrapper[4959]: I0128 15:20:52.603256 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c99685-nm45j"] Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137600 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5dd877f547-2kjht"] Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.137867 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0752de24-139e-4646-b96b-c8fa75e3ac6f" containerName="controller-manager" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137884 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="0752de24-139e-4646-b96b-c8fa75e3ac6f" containerName="controller-manager" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.137903 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" containerName="extract-content" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137910 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" containerName="extract-content" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.137918 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137926 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.137936 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerName="extract-utilities" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137942 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerName="extract-utilities" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.137949 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="extract-utilities" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137955 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="extract-utilities" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.137965 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" containerName="extract-utilities" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137970 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" containerName="extract-utilities" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.137977 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137983 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.137992 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" containerName="route-controller-manager" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.137999 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" containerName="route-controller-manager" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.138008 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138015 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.138026 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="extract-content" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138033 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="extract-content" Jan 28 15:20:53 crc kubenswrapper[4959]: E0128 15:20:53.138042 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerName="extract-content" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138048 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerName="extract-content" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138158 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef01ad95-eb28-48c8-9d58-3ea696164442" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138170 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="48ead489-a40c-4d3c-a18f-83287043b523" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138176 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="0752de24-139e-4646-b96b-c8fa75e3ac6f" containerName="controller-manager" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138183 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" containerName="route-controller-manager" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138193 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="13b2e969-f501-4266-bcf2-76514bf739c2" containerName="registry-server" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.138580 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.142019 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.142190 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.142441 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.142634 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.144525 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.144762 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc"] Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.145270 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.146270 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.150090 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.154150 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.154237 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.158421 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5dd877f547-2kjht"] Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.158989 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.161370 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mdwd\" (UniqueName: \"kubernetes.io/projected/1d848d1b-446a-42bb-a789-5df54f8217aa-kube-api-access-9mdwd\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.161425 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-client-ca\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.161459 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgtvq\" (UniqueName: \"kubernetes.io/projected/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-kube-api-access-mgtvq\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.161491 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-config\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.161525 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d848d1b-446a-42bb-a789-5df54f8217aa-serving-cert\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.165442 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-serving-cert\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.165498 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-proxy-ca-bundles\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.165559 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.165607 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-config\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.165645 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-client-ca\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.165669 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.167044 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.181606 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc"] Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.266887 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mdwd\" (UniqueName: \"kubernetes.io/projected/1d848d1b-446a-42bb-a789-5df54f8217aa-kube-api-access-9mdwd\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.266963 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-client-ca\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.266986 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgtvq\" (UniqueName: \"kubernetes.io/projected/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-kube-api-access-mgtvq\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.267031 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-config\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.267060 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d848d1b-446a-42bb-a789-5df54f8217aa-serving-cert\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.267137 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-serving-cert\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.267155 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-proxy-ca-bundles\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.267215 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-config\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.267264 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-client-ca\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.268352 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-client-ca\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.268655 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-client-ca\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.268698 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-config\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.268991 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-config\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.269456 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-proxy-ca-bundles\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.273931 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-serving-cert\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.273957 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d848d1b-446a-42bb-a789-5df54f8217aa-serving-cert\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.289497 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgtvq\" (UniqueName: \"kubernetes.io/projected/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-kube-api-access-mgtvq\") pod \"controller-manager-5dd877f547-2kjht\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.294368 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mdwd\" (UniqueName: \"kubernetes.io/projected/1d848d1b-446a-42bb-a789-5df54f8217aa-kube-api-access-9mdwd\") pod \"route-controller-manager-7dd59f5db4-548wc\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.461483 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.468238 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.715051 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.767278 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.945838 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc"] Jan 28 15:20:53 crc kubenswrapper[4959]: W0128 15:20:53.953989 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d848d1b_446a_42bb_a789_5df54f8217aa.slice/crio-b253251638ab156ace42b68741d30230565e958908e7e6c0f9bbd7ca6cc0b888 WatchSource:0}: Error finding container b253251638ab156ace42b68741d30230565e958908e7e6c0f9bbd7ca6cc0b888: Status 404 returned error can't find the container with id b253251638ab156ace42b68741d30230565e958908e7e6c0f9bbd7ca6cc0b888 Jan 28 15:20:53 crc kubenswrapper[4959]: I0128 15:20:53.986805 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5dd877f547-2kjht"] Jan 28 15:20:53 crc kubenswrapper[4959]: W0128 15:20:53.998527 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b3091fe_30cf_44cb_99b5_81f32e4f10d4.slice/crio-b1a43916c0b64f2726b95bf57da5c2ec071054885d7efe9b0e6cea03a7ab0113 WatchSource:0}: Error finding container b1a43916c0b64f2726b95bf57da5c2ec071054885d7efe9b0e6cea03a7ab0113: Status 404 returned error can't find the container with id b1a43916c0b64f2726b95bf57da5c2ec071054885d7efe9b0e6cea03a7ab0113 Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.596199 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0752de24-139e-4646-b96b-c8fa75e3ac6f" path="/var/lib/kubelet/pods/0752de24-139e-4646-b96b-c8fa75e3ac6f/volumes" Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.597500 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fa86a3a-4d4e-4d68-88a2-7cf59cad2807" path="/var/lib/kubelet/pods/4fa86a3a-4d4e-4d68-88a2-7cf59cad2807/volumes" Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.598013 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" event={"ID":"1b3091fe-30cf-44cb-99b5-81f32e4f10d4","Type":"ContainerStarted","Data":"91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6"} Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.598047 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" event={"ID":"1b3091fe-30cf-44cb-99b5-81f32e4f10d4","Type":"ContainerStarted","Data":"b1a43916c0b64f2726b95bf57da5c2ec071054885d7efe9b0e6cea03a7ab0113"} Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.598065 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.598396 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" event={"ID":"1d848d1b-446a-42bb-a789-5df54f8217aa","Type":"ContainerStarted","Data":"3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5"} Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.598427 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" event={"ID":"1d848d1b-446a-42bb-a789-5df54f8217aa","Type":"ContainerStarted","Data":"b253251638ab156ace42b68741d30230565e958908e7e6c0f9bbd7ca6cc0b888"} Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.606188 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.620147 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" podStartSLOduration=3.620082718 podStartE2EDuration="3.620082718s" podCreationTimestamp="2026-01-28 15:20:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:54.616995421 +0000 UTC m=+238.062901824" watchObservedRunningTime="2026-01-28 15:20:54.620082718 +0000 UTC m=+238.065989101" Jan 28 15:20:54 crc kubenswrapper[4959]: I0128 15:20:54.651615 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" podStartSLOduration=3.651570976 podStartE2EDuration="3.651570976s" podCreationTimestamp="2026-01-28 15:20:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:20:54.650771286 +0000 UTC m=+238.096677679" watchObservedRunningTime="2026-01-28 15:20:54.651570976 +0000 UTC m=+238.097477369" Jan 28 15:20:55 crc kubenswrapper[4959]: I0128 15:20:55.606725 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:55 crc kubenswrapper[4959]: I0128 15:20:55.612640 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.202156 4959 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.203435 4959 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.203713 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98" gracePeriod=15 Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.203840 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.203956 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e" gracePeriod=15 Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.204029 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e" gracePeriod=15 Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.204083 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881" gracePeriod=15 Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.204177 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e" gracePeriod=15 Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.204934 4959 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 15:20:59 crc kubenswrapper[4959]: E0128 15:20:59.205425 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205440 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 15:20:59 crc kubenswrapper[4959]: E0128 15:20:59.205455 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205462 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 15:20:59 crc kubenswrapper[4959]: E0128 15:20:59.205472 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205479 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 15:20:59 crc kubenswrapper[4959]: E0128 15:20:59.205488 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205493 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 15:20:59 crc kubenswrapper[4959]: E0128 15:20:59.205510 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205516 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 15:20:59 crc kubenswrapper[4959]: E0128 15:20:59.205523 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205530 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 15:20:59 crc kubenswrapper[4959]: E0128 15:20:59.205543 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205549 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205642 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205652 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205663 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205674 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205682 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.205689 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.361011 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.361081 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.361143 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.361172 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.361406 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.361460 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.361547 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.361593 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463371 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463499 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463530 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463589 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463617 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463656 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463659 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463719 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463761 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463778 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463810 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463601 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463857 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463908 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463944 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.463909 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.958774 4959 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 28 15:20:59 crc kubenswrapper[4959]: I0128 15:20:59.958857 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.122684 4959 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.122765 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.590495 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.590890 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:21:00Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:21:00Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:21:00Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:21:00Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:80389051bc0ea34449a3ee9b5472446041cb0f2e47fa9d2048010428fa1019ba\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:e9a0654b0e53f31c6f63037d06bc5145dc7b9c46a7ac2d778d473d966efb9e14\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1675675872},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:68c28a690c4c3482a63d6de9cf3b80304e983243444eb4d2c5fcaf5c051eb54b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:a273081c72178c20c79eca9b18dbb926d33a6bb826b215c14de6b31207e497ca\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1202349806},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:364f5956de22b63db7dad4fcdd1f2740f71a482026c15aa3e2abebfbc5bf2fd7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:d3d262f90dd0f3c3f809b45f327ca086741a47f73e44560b04787609f0f99567\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1187310829},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:420326d8488ceff2cde22ad8b85d739b0c254d47e703f7ddb1f08f77a48816a6\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:54817da328fa589491a3acbe80acdd88c0830dcc63aaafc08c3539925a1a3b03\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1180692192},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.591238 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.591461 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.591698 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.592012 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.592054 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.627911 4959 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.628555 4959 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.629024 4959 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.629357 4959 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.629705 4959 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.629925 4959 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.630409 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="200ms" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.637554 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.638583 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.639427 4959 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e" exitCode=2 Jan 28 15:21:00 crc kubenswrapper[4959]: E0128 15:21:00.832160 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="400ms" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.886221 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.886292 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.926527 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:21:00 crc kubenswrapper[4959]: I0128 15:21:00.927406 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: E0128 15:21:01.233210 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="800ms" Jan 28 15:21:01 crc kubenswrapper[4959]: E0128 15:21:01.595514 4959 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.107:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" volumeName="registry-storage" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.646832 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.648318 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.648667 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.649541 4959 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e" exitCode=0 Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.649601 4959 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e" exitCode=0 Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.649614 4959 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881" exitCode=0 Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.649627 4959 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98" exitCode=0 Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.649675 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b910c1d48fa6f4eb31400571b74c3a33c873cc79bb515cc0227cd87e2a962f94" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.649613 4959 scope.go:117] "RemoveContainer" containerID="9e32d471f4791cd159cb2c298701c4eb762332e3fbd4eb18d9302af0a8a6cc74" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.650004 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.651955 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.652089 4959 generic.go:334] "Generic (PLEG): container finished" podID="beb38771-a14f-473a-9e2b-c9585abb58dc" containerID="6217bd860b9ca7c69120c91eae5ffde7737f795749b394ecbf501ffd46dade00" exitCode=0 Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.652154 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"beb38771-a14f-473a-9e2b-c9585abb58dc","Type":"ContainerDied","Data":"6217bd860b9ca7c69120c91eae5ffde7737f795749b394ecbf501ffd46dade00"} Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.652731 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.653094 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.653595 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.653817 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.654127 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.706455 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.707367 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.707757 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.708258 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722257 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722395 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722443 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722510 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722599 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722624 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722827 4959 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722857 4959 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:01 crc kubenswrapper[4959]: I0128 15:21:01.722868 4959 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:02 crc kubenswrapper[4959]: E0128 15:21:02.034837 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="1.6s" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.320455 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.320518 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.361545 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.361987 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.362269 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.362463 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.362694 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.594559 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.662431 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.663847 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.665872 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.666088 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.666360 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.666515 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.673351 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.673709 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.674007 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.674459 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.717123 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.718220 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.718560 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.719033 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:02 crc kubenswrapper[4959]: I0128 15:21:02.719489 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.043176 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.044296 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.044940 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.045676 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.045976 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.142743 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-kubelet-dir\") pod \"beb38771-a14f-473a-9e2b-c9585abb58dc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.142809 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-var-lock\") pod \"beb38771-a14f-473a-9e2b-c9585abb58dc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.142857 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/beb38771-a14f-473a-9e2b-c9585abb58dc-kube-api-access\") pod \"beb38771-a14f-473a-9e2b-c9585abb58dc\" (UID: \"beb38771-a14f-473a-9e2b-c9585abb58dc\") " Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.142905 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "beb38771-a14f-473a-9e2b-c9585abb58dc" (UID: "beb38771-a14f-473a-9e2b-c9585abb58dc"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.142953 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-var-lock" (OuterVolumeSpecName: "var-lock") pod "beb38771-a14f-473a-9e2b-c9585abb58dc" (UID: "beb38771-a14f-473a-9e2b-c9585abb58dc"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.143269 4959 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.143289 4959 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/beb38771-a14f-473a-9e2b-c9585abb58dc-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.150588 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/beb38771-a14f-473a-9e2b-c9585abb58dc-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "beb38771-a14f-473a-9e2b-c9585abb58dc" (UID: "beb38771-a14f-473a-9e2b-c9585abb58dc"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.245164 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/beb38771-a14f-473a-9e2b-c9585abb58dc-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:03 crc kubenswrapper[4959]: E0128 15:21:03.635920 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="3.2s" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.669202 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"beb38771-a14f-473a-9e2b-c9585abb58dc","Type":"ContainerDied","Data":"0de0ed828c41581e21508149e51763a75bddb9d017e55b5fa3f829ecf7b1c53f"} Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.669253 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0de0ed828c41581e21508149e51763a75bddb9d017e55b5fa3f829ecf7b1c53f" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.669267 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.682071 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.682459 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.683184 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:03 crc kubenswrapper[4959]: I0128 15:21:03.683995 4959 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:04 crc kubenswrapper[4959]: E0128 15:21:04.241578 4959 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.107:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:21:04 crc kubenswrapper[4959]: I0128 15:21:04.242371 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:21:04 crc kubenswrapper[4959]: W0128 15:21:04.264910 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-4f89f8e82a69b80b5386f2068144053c21cf5f767c8c1a4d74fbb5af3cd57178 WatchSource:0}: Error finding container 4f89f8e82a69b80b5386f2068144053c21cf5f767c8c1a4d74fbb5af3cd57178: Status 404 returned error can't find the container with id 4f89f8e82a69b80b5386f2068144053c21cf5f767c8c1a4d74fbb5af3cd57178 Jan 28 15:21:04 crc kubenswrapper[4959]: E0128 15:21:04.267832 4959 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.107:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188eee407853f67f openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 15:21:04.267269759 +0000 UTC m=+247.713176142,LastTimestamp:2026-01-28 15:21:04.267269759 +0000 UTC m=+247.713176142,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 15:21:04 crc kubenswrapper[4959]: I0128 15:21:04.680405 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622"} Jan 28 15:21:04 crc kubenswrapper[4959]: I0128 15:21:04.680508 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"4f89f8e82a69b80b5386f2068144053c21cf5f767c8c1a4d74fbb5af3cd57178"} Jan 28 15:21:04 crc kubenswrapper[4959]: E0128 15:21:04.681740 4959 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.107:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:21:04 crc kubenswrapper[4959]: I0128 15:21:04.682094 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:04 crc kubenswrapper[4959]: I0128 15:21:04.682558 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:04 crc kubenswrapper[4959]: I0128 15:21:04.683075 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:04 crc kubenswrapper[4959]: E0128 15:21:04.718047 4959 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.107:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188eee407853f67f openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 15:21:04.267269759 +0000 UTC m=+247.713176142,LastTimestamp:2026-01-28 15:21:04.267269759 +0000 UTC m=+247.713176142,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.451643 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" containerName="oauth-openshift" containerID="cri-o://e754bf65fa3e302711fcc3cc917c91511b61384f2e777ba0c4b23ef1c80e4689" gracePeriod=15 Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.688452 4959 generic.go:334] "Generic (PLEG): container finished" podID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" containerID="e754bf65fa3e302711fcc3cc917c91511b61384f2e777ba0c4b23ef1c80e4689" exitCode=0 Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.688498 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" event={"ID":"29b47bb2-f090-43a4-b2ea-7bb83b683efb","Type":"ContainerDied","Data":"e754bf65fa3e302711fcc3cc917c91511b61384f2e777ba0c4b23ef1c80e4689"} Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.933274 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.934469 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.934731 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.934969 4959 status_manager.go:851] "Failed to get status for pod" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-nk6xq\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.935209 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991207 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-cliconfig\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991281 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-serving-cert\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991320 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-login\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991344 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-ocp-branding-template\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991368 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-session\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991398 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-trusted-ca-bundle\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991421 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-policies\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991437 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-idp-0-file-data\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991467 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-service-ca\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991495 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-provider-selection\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991520 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-router-certs\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991539 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-dir\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991585 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-error\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.991624 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ncb9\" (UniqueName: \"kubernetes.io/projected/29b47bb2-f090-43a4-b2ea-7bb83b683efb-kube-api-access-8ncb9\") pod \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\" (UID: \"29b47bb2-f090-43a4-b2ea-7bb83b683efb\") " Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.992625 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.993044 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.993095 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.993321 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:21:05 crc kubenswrapper[4959]: I0128 15:21:05.993344 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.001343 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.001774 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29b47bb2-f090-43a4-b2ea-7bb83b683efb-kube-api-access-8ncb9" (OuterVolumeSpecName: "kube-api-access-8ncb9") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "kube-api-access-8ncb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.002274 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.005628 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.008495 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.008622 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.009647 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.011401 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.011674 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "29b47bb2-f090-43a4-b2ea-7bb83b683efb" (UID: "29b47bb2-f090-43a4-b2ea-7bb83b683efb"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092673 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092727 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ncb9\" (UniqueName: \"kubernetes.io/projected/29b47bb2-f090-43a4-b2ea-7bb83b683efb-kube-api-access-8ncb9\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092745 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092757 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092768 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092781 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092794 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092808 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092819 4959 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092830 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092841 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092852 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092868 4959 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/29b47bb2-f090-43a4-b2ea-7bb83b683efb-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.092880 4959 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/29b47bb2-f090-43a4-b2ea-7bb83b683efb-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.695743 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" event={"ID":"29b47bb2-f090-43a4-b2ea-7bb83b683efb","Type":"ContainerDied","Data":"60250782405c8849376c3fa6d599aed15dcabc0e342ead76d8044cbbc2319135"} Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.695808 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.695825 4959 scope.go:117] "RemoveContainer" containerID="e754bf65fa3e302711fcc3cc917c91511b61384f2e777ba0c4b23ef1c80e4689" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.696555 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.696792 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.697057 4959 status_manager.go:851] "Failed to get status for pod" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-nk6xq\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.697347 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.701941 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.702439 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.703264 4959 status_manager.go:851] "Failed to get status for pod" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-nk6xq\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:06 crc kubenswrapper[4959]: I0128 15:21:06.703557 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:06 crc kubenswrapper[4959]: E0128 15:21:06.836883 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="6.4s" Jan 28 15:21:10 crc kubenswrapper[4959]: I0128 15:21:10.591885 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: I0128 15:21:10.593695 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: I0128 15:21:10.595054 4959 status_manager.go:851] "Failed to get status for pod" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-nk6xq\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: I0128 15:21:10.595407 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: E0128 15:21:10.934936 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:21:10Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:21:10Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:21:10Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T15:21:10Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:80389051bc0ea34449a3ee9b5472446041cb0f2e47fa9d2048010428fa1019ba\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:e9a0654b0e53f31c6f63037d06bc5145dc7b9c46a7ac2d778d473d966efb9e14\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1675675872},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:68c28a690c4c3482a63d6de9cf3b80304e983243444eb4d2c5fcaf5c051eb54b\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:a273081c72178c20c79eca9b18dbb926d33a6bb826b215c14de6b31207e497ca\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1202349806},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:364f5956de22b63db7dad4fcdd1f2740f71a482026c15aa3e2abebfbc5bf2fd7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:d3d262f90dd0f3c3f809b45f327ca086741a47f73e44560b04787609f0f99567\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1187310829},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:420326d8488ceff2cde22ad8b85d739b0c254d47e703f7ddb1f08f77a48816a6\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:54817da328fa589491a3acbe80acdd88c0830dcc63aaafc08c3539925a1a3b03\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1180692192},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: E0128 15:21:10.935481 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: E0128 15:21:10.935889 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: E0128 15:21:10.936161 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: E0128 15:21:10.936469 4959 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:10 crc kubenswrapper[4959]: E0128 15:21:10.936501 4959 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.744394 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.744891 4959 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc" exitCode=1 Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.744938 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc"} Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.746076 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.746337 4959 scope.go:117] "RemoveContainer" containerID="f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc" Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.746384 4959 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.746573 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.746746 4959 status_manager.go:851] "Failed to get status for pod" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-nk6xq\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:11 crc kubenswrapper[4959]: I0128 15:21:11.747044 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:12 crc kubenswrapper[4959]: I0128 15:21:12.755748 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 15:21:12 crc kubenswrapper[4959]: I0128 15:21:12.756520 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"999cff687b60b9f76572b44b95153625bc63a42bb7f802665d88aaf10060c426"} Jan 28 15:21:12 crc kubenswrapper[4959]: I0128 15:21:12.758006 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:12 crc kubenswrapper[4959]: I0128 15:21:12.758771 4959 status_manager.go:851] "Failed to get status for pod" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-nk6xq\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:12 crc kubenswrapper[4959]: I0128 15:21:12.759499 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:12 crc kubenswrapper[4959]: I0128 15:21:12.759922 4959 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:12 crc kubenswrapper[4959]: I0128 15:21:12.760426 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:13 crc kubenswrapper[4959]: E0128 15:21:13.238960 4959 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.107:6443: connect: connection refused" interval="7s" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.587096 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.589553 4959 status_manager.go:851] "Failed to get status for pod" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-nk6xq\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.590127 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.590374 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.590588 4959 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.590782 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.610755 4959 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.610806 4959 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:14 crc kubenswrapper[4959]: E0128 15:21:14.611563 4959 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.612191 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:14 crc kubenswrapper[4959]: E0128 15:21:14.719875 4959 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.107:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188eee407853f67f openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 15:21:04.267269759 +0000 UTC m=+247.713176142,LastTimestamp:2026-01-28 15:21:04.267269759 +0000 UTC m=+247.713176142,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 28 15:21:14 crc kubenswrapper[4959]: I0128 15:21:14.768599 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4ac470eea611eb2c0adea62c8d54025c5ae820d231f0924fe3b284fca00f5d1d"} Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.777272 4959 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="dcb4950eeae51517e71fff667da709b4ecdeaac699b4d96f1334b4f09b90d474" exitCode=0 Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.777350 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"dcb4950eeae51517e71fff667da709b4ecdeaac699b4d96f1334b4f09b90d474"} Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.777621 4959 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.777700 4959 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:15 crc kubenswrapper[4959]: E0128 15:21:15.778082 4959 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.778150 4959 status_manager.go:851] "Failed to get status for pod" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.778617 4959 status_manager.go:851] "Failed to get status for pod" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" pod="openshift-marketplace/community-operators-z8lcg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-z8lcg\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.778834 4959 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.778985 4959 status_manager.go:851] "Failed to get status for pod" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" pod="openshift-marketplace/redhat-marketplace-dkw87" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dkw87\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:15 crc kubenswrapper[4959]: I0128 15:21:15.779162 4959 status_manager.go:851] "Failed to get status for pod" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" pod="openshift-authentication/oauth-openshift-558db77b4-nk6xq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-nk6xq\": dial tcp 38.102.83.107:6443: connect: connection refused" Jan 28 15:21:16 crc kubenswrapper[4959]: I0128 15:21:16.795622 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"66fc3513c7143ae8bfee4a6025428f23b662356a10df842aac51a5019c1b8b61"} Jan 28 15:21:16 crc kubenswrapper[4959]: I0128 15:21:16.796022 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"95961241a293bdd9893741641b59c3c7e789f79c18a7e6924504f536bb451123"} Jan 28 15:21:16 crc kubenswrapper[4959]: I0128 15:21:16.796042 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ca5ffc0aa23232ea53d955a1ee99194aeade4fa87312d494e694d8a881f866a8"} Jan 28 15:21:16 crc kubenswrapper[4959]: I0128 15:21:16.796054 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3a67ffafb5b52648273603aa9fa565c56f7206d5629992be7fbf0b5065567cd1"} Jan 28 15:21:17 crc kubenswrapper[4959]: I0128 15:21:17.806765 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5d5a0a0e74b07abf3d3663c381fa682ec8559219b80e1fb4be1b18c6acc8623c"} Jan 28 15:21:17 crc kubenswrapper[4959]: I0128 15:21:17.806965 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:17 crc kubenswrapper[4959]: I0128 15:21:17.807102 4959 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:17 crc kubenswrapper[4959]: I0128 15:21:17.807157 4959 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:18 crc kubenswrapper[4959]: I0128 15:21:18.471350 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:21:18 crc kubenswrapper[4959]: I0128 15:21:18.471666 4959 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 28 15:21:18 crc kubenswrapper[4959]: I0128 15:21:18.471932 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 28 15:21:19 crc kubenswrapper[4959]: I0128 15:21:19.613010 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:19 crc kubenswrapper[4959]: I0128 15:21:19.613226 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:19 crc kubenswrapper[4959]: I0128 15:21:19.618870 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:20 crc kubenswrapper[4959]: I0128 15:21:20.722009 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:21:22 crc kubenswrapper[4959]: I0128 15:21:22.871132 4959 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:23 crc kubenswrapper[4959]: I0128 15:21:23.130040 4959 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="549cbcab-efe2-4f45-83d1-c388dcdcfffc" Jan 28 15:21:23 crc kubenswrapper[4959]: I0128 15:21:23.846305 4959 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:23 crc kubenswrapper[4959]: I0128 15:21:23.847050 4959 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:23 crc kubenswrapper[4959]: I0128 15:21:23.849155 4959 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="549cbcab-efe2-4f45-83d1-c388dcdcfffc" Jan 28 15:21:23 crc kubenswrapper[4959]: I0128 15:21:23.853100 4959 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://3a67ffafb5b52648273603aa9fa565c56f7206d5629992be7fbf0b5065567cd1" Jan 28 15:21:23 crc kubenswrapper[4959]: I0128 15:21:23.853144 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:24 crc kubenswrapper[4959]: I0128 15:21:24.856937 4959 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:24 crc kubenswrapper[4959]: I0128 15:21:24.856972 4959 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="db03bd23-3b09-4f78-a35a-d219c7a948e4" Jan 28 15:21:24 crc kubenswrapper[4959]: I0128 15:21:24.860772 4959 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="549cbcab-efe2-4f45-83d1-c388dcdcfffc" Jan 28 15:21:28 crc kubenswrapper[4959]: I0128 15:21:28.470387 4959 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 28 15:21:28 crc kubenswrapper[4959]: I0128 15:21:28.470756 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 28 15:21:33 crc kubenswrapper[4959]: I0128 15:21:33.010304 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 15:21:33 crc kubenswrapper[4959]: I0128 15:21:33.112615 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 15:21:33 crc kubenswrapper[4959]: I0128 15:21:33.542093 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 15:21:33 crc kubenswrapper[4959]: I0128 15:21:33.841576 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 15:21:33 crc kubenswrapper[4959]: I0128 15:21:33.911221 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 15:21:33 crc kubenswrapper[4959]: I0128 15:21:33.948021 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 15:21:34 crc kubenswrapper[4959]: I0128 15:21:34.042983 4959 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 15:21:34 crc kubenswrapper[4959]: I0128 15:21:34.186671 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 15:21:34 crc kubenswrapper[4959]: I0128 15:21:34.195004 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 15:21:34 crc kubenswrapper[4959]: I0128 15:21:34.383926 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 15:21:34 crc kubenswrapper[4959]: I0128 15:21:34.455255 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 15:21:34 crc kubenswrapper[4959]: I0128 15:21:34.459500 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 15:21:34 crc kubenswrapper[4959]: I0128 15:21:34.913078 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 15:21:34 crc kubenswrapper[4959]: I0128 15:21:34.950404 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 15:21:35 crc kubenswrapper[4959]: I0128 15:21:35.034918 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 15:21:35 crc kubenswrapper[4959]: I0128 15:21:35.628267 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 15:21:35 crc kubenswrapper[4959]: I0128 15:21:35.733222 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 15:21:35 crc kubenswrapper[4959]: I0128 15:21:35.751589 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 15:21:35 crc kubenswrapper[4959]: I0128 15:21:35.788087 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 15:21:35 crc kubenswrapper[4959]: I0128 15:21:35.797803 4959 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 15:21:35 crc kubenswrapper[4959]: I0128 15:21:35.804313 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.103661 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.300510 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.396139 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.508196 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.552937 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.643761 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.671412 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.762714 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.809679 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.877605 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 15:21:36 crc kubenswrapper[4959]: I0128 15:21:36.952482 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.010222 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.027056 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.038555 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.079256 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.111867 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.129431 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.206558 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.222600 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.229482 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.460966 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.483055 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.640729 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.684843 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.690632 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.716473 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.815360 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.858903 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.865662 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.919254 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 15:21:37 crc kubenswrapper[4959]: I0128 15:21:37.967262 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.101990 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.109445 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.157897 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.418657 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.439365 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.471359 4959 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.471455 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.471538 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.472399 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"999cff687b60b9f76572b44b95153625bc63a42bb7f802665d88aaf10060c426"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.472536 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://999cff687b60b9f76572b44b95153625bc63a42bb7f802665d88aaf10060c426" gracePeriod=30 Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.523368 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.542508 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.623745 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.667182 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.667186 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 15:21:38 crc kubenswrapper[4959]: I0128 15:21:38.701396 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.245002 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.368459 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.416211 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.510402 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.556712 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.575528 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.596703 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.683609 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.698879 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.781137 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.817213 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.898475 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.922153 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 15:21:39 crc kubenswrapper[4959]: I0128 15:21:39.991753 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.041306 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.061430 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.117135 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.195622 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.273283 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.320474 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.322660 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.337016 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.361050 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.397210 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.428090 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.439875 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.526609 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.553194 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.593357 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.629940 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.631002 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.694453 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.732781 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.765187 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.779934 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.805566 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.884515 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.892841 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 15:21:40 crc kubenswrapper[4959]: I0128 15:21:40.907567 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.086660 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.141732 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.229479 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.258171 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.374759 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.506854 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.518995 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.541518 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.612466 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.612710 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.728216 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.750679 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.762493 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.765921 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.824922 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.842603 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.854019 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.861206 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.934595 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.953827 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.977749 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 15:21:41 crc kubenswrapper[4959]: I0128 15:21:41.981029 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.049615 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.055265 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.074987 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.183278 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.191777 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.237187 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.372602 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.413951 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.439008 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.561071 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.594464 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.652740 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.725149 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.812953 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.819274 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.938347 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.938726 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.958398 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 15:21:42 crc kubenswrapper[4959]: I0128 15:21:42.982460 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.020531 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.022395 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.078303 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.116574 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.475998 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.561312 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.620094 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.787414 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.795240 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.879993 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.910556 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.929126 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.931391 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 15:21:43 crc kubenswrapper[4959]: I0128 15:21:43.965955 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.034402 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.054588 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.158749 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.196986 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.197154 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.311002 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.461267 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.494401 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.513499 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.549382 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.584007 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.679602 4959 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.685528 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-nk6xq"] Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.685603 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.689627 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.704830 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=22.704809204 podStartE2EDuration="22.704809204s" podCreationTimestamp="2026-01-28 15:21:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:21:44.701623162 +0000 UTC m=+288.147529565" watchObservedRunningTime="2026-01-28 15:21:44.704809204 +0000 UTC m=+288.150715587" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.709969 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.758992 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.787687 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.794599 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 15:21:44 crc kubenswrapper[4959]: I0128 15:21:44.801271 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.382994 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.412822 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.553266 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.618795 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.737165 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.824242 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.906609 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.954258 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.963532 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 15:21:45 crc kubenswrapper[4959]: I0128 15:21:45.967377 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.006163 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.097212 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.105660 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.244482 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.286814 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.326380 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.479064 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.595244 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" path="/var/lib/kubelet/pods/29b47bb2-f090-43a4-b2ea-7bb83b683efb/volumes" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.597914 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.740172 4959 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.813278 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.834324 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.959758 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 15:21:46 crc kubenswrapper[4959]: I0128 15:21:46.971944 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.003192 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.044208 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.138452 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.160325 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.165751 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.167938 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.231761 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.308636 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.334456 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.580566 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.916719 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 15:21:47 crc kubenswrapper[4959]: I0128 15:21:47.982409 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.086017 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.133806 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.197664 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.197733 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.203830 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.296553 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.504070 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.514760 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb"] Jan 28 15:21:48 crc kubenswrapper[4959]: E0128 15:21:48.515162 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" containerName="installer" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.515187 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" containerName="installer" Jan 28 15:21:48 crc kubenswrapper[4959]: E0128 15:21:48.515210 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" containerName="oauth-openshift" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.515218 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" containerName="oauth-openshift" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.515351 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="beb38771-a14f-473a-9e2b-c9585abb58dc" containerName="installer" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.515366 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="29b47bb2-f090-43a4-b2ea-7bb83b683efb" containerName="oauth-openshift" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.515918 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.518576 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.518878 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.519625 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.519636 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.519750 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.520038 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.521056 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.522562 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.523472 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.523597 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.525125 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.527881 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.532432 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.534522 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.534761 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.538315 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb"] Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.544247 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.623791 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.623845 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.623875 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sddp\" (UniqueName: \"kubernetes.io/projected/fd7938e0-b088-4d18-8ea6-adeb468d5956-kube-api-access-2sddp\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.623897 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-router-certs\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.623946 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-cliconfig\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.623982 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-serving-cert\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.624012 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-login\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.624037 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.624064 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.624138 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fd7938e0-b088-4d18-8ea6-adeb468d5956-audit-dir\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.624177 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-session\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.624200 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-audit-policies\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.624225 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-service-ca\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.624259 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-error\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725198 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725333 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725411 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sddp\" (UniqueName: \"kubernetes.io/projected/fd7938e0-b088-4d18-8ea6-adeb468d5956-kube-api-access-2sddp\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725482 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-router-certs\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725539 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-cliconfig\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725590 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-serving-cert\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725649 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-login\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725695 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725736 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725795 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fd7938e0-b088-4d18-8ea6-adeb468d5956-audit-dir\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725841 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-session\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725878 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-audit-policies\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.725962 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-service-ca\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.726000 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-error\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.726149 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fd7938e0-b088-4d18-8ea6-adeb468d5956-audit-dir\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.726807 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-cliconfig\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.727520 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-audit-policies\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.727579 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-service-ca\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.727711 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.733437 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-login\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.733736 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-error\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.733773 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-router-certs\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.734010 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.734190 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-session\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.735157 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-serving-cert\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.735427 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.736793 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fd7938e0-b088-4d18-8ea6-adeb468d5956-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.756360 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sddp\" (UniqueName: \"kubernetes.io/projected/fd7938e0-b088-4d18-8ea6-adeb468d5956-kube-api-access-2sddp\") pod \"oauth-openshift-675f5cc7c5-s4nxb\" (UID: \"fd7938e0-b088-4d18-8ea6-adeb468d5956\") " pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.838371 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.855917 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 15:21:48 crc kubenswrapper[4959]: I0128 15:21:48.959499 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 15:21:49 crc kubenswrapper[4959]: I0128 15:21:49.293865 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb"] Jan 28 15:21:50 crc kubenswrapper[4959]: I0128 15:21:50.016978 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" event={"ID":"fd7938e0-b088-4d18-8ea6-adeb468d5956","Type":"ContainerStarted","Data":"40818225517ea8b937256ef1f258088310bdf8ce4eec1d33b63772ee55778d90"} Jan 28 15:21:50 crc kubenswrapper[4959]: I0128 15:21:50.017059 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" event={"ID":"fd7938e0-b088-4d18-8ea6-adeb468d5956","Type":"ContainerStarted","Data":"ebc581d8fa9aaed7940cae78fff4866a89da9f5a285ed8bece27a00f17df9c69"} Jan 28 15:21:50 crc kubenswrapper[4959]: I0128 15:21:50.018288 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:50 crc kubenswrapper[4959]: I0128 15:21:50.036548 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" podStartSLOduration=70.036526096 podStartE2EDuration="1m10.036526096s" podCreationTimestamp="2026-01-28 15:20:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:21:50.035235912 +0000 UTC m=+293.481142315" watchObservedRunningTime="2026-01-28 15:21:50.036526096 +0000 UTC m=+293.482432479" Jan 28 15:21:50 crc kubenswrapper[4959]: I0128 15:21:50.369852 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-675f5cc7c5-s4nxb" Jan 28 15:21:55 crc kubenswrapper[4959]: I0128 15:21:55.684585 4959 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 15:21:55 crc kubenswrapper[4959]: I0128 15:21:55.685671 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622" gracePeriod=5 Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.147531 4959 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.481187 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.832539 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.833037 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930432 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930497 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930537 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930563 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930590 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930901 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930940 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930957 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.930974 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:22:00 crc kubenswrapper[4959]: I0128 15:22:00.939190 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.032791 4959 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.032917 4959 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.032930 4959 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.032938 4959 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.032946 4959 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.112598 4959 generic.go:334] "Generic (PLEG): container finished" podID="2d4611de-0934-450c-a51e-67298e455900" containerID="ae4343e6208f796196fabbce1d4dc7387a3ed9b393247c0c2d168565e75ec662" exitCode=0 Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.112694 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" event={"ID":"2d4611de-0934-450c-a51e-67298e455900","Type":"ContainerDied","Data":"ae4343e6208f796196fabbce1d4dc7387a3ed9b393247c0c2d168565e75ec662"} Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.113844 4959 scope.go:117] "RemoveContainer" containerID="ae4343e6208f796196fabbce1d4dc7387a3ed9b393247c0c2d168565e75ec662" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.114554 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.114625 4959 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622" exitCode=137 Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.114689 4959 scope.go:117] "RemoveContainer" containerID="835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.114697 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.146925 4959 scope.go:117] "RemoveContainer" containerID="835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622" Jan 28 15:22:01 crc kubenswrapper[4959]: E0128 15:22:01.147659 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622\": container with ID starting with 835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622 not found: ID does not exist" containerID="835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.147718 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622"} err="failed to get container status \"835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622\": rpc error: code = NotFound desc = could not find container \"835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622\": container with ID starting with 835533e7572b4d3232673869cae012e480cbb780580bea68b4e5efcce0c29622 not found: ID does not exist" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.151508 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 15:22:01 crc kubenswrapper[4959]: I0128 15:22:01.968028 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 15:22:02 crc kubenswrapper[4959]: I0128 15:22:02.122745 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" event={"ID":"2d4611de-0934-450c-a51e-67298e455900","Type":"ContainerStarted","Data":"37973bf303360e1029cfb16e4daed33e4d23a9a5eecf977a47dbccbd66f2f864"} Jan 28 15:22:02 crc kubenswrapper[4959]: I0128 15:22:02.123752 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:22:02 crc kubenswrapper[4959]: I0128 15:22:02.127944 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:22:02 crc kubenswrapper[4959]: I0128 15:22:02.555785 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 15:22:02 crc kubenswrapper[4959]: I0128 15:22:02.595237 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 28 15:22:04 crc kubenswrapper[4959]: I0128 15:22:04.923190 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc"] Jan 28 15:22:04 crc kubenswrapper[4959]: I0128 15:22:04.923990 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" podUID="1d848d1b-446a-42bb-a789-5df54f8217aa" containerName="route-controller-manager" containerID="cri-o://3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5" gracePeriod=30 Jan 28 15:22:04 crc kubenswrapper[4959]: I0128 15:22:04.931480 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5dd877f547-2kjht"] Jan 28 15:22:04 crc kubenswrapper[4959]: I0128 15:22:04.931842 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" podUID="1b3091fe-30cf-44cb-99b5-81f32e4f10d4" containerName="controller-manager" containerID="cri-o://91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6" gracePeriod=30 Jan 28 15:22:05 crc kubenswrapper[4959]: I0128 15:22:05.651942 4959 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.023467 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.060684 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms"] Jan 28 15:22:06 crc kubenswrapper[4959]: E0128 15:22:06.060934 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d848d1b-446a-42bb-a789-5df54f8217aa" containerName="route-controller-manager" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.060948 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d848d1b-446a-42bb-a789-5df54f8217aa" containerName="route-controller-manager" Jan 28 15:22:06 crc kubenswrapper[4959]: E0128 15:22:06.060968 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.060975 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.061075 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.061088 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d848d1b-446a-42bb-a789-5df54f8217aa" containerName="route-controller-manager" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.061527 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.078873 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms"] Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.091176 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.108098 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-client-ca\") pod \"1d848d1b-446a-42bb-a789-5df54f8217aa\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.108201 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-config\") pod \"1d848d1b-446a-42bb-a789-5df54f8217aa\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.108322 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mdwd\" (UniqueName: \"kubernetes.io/projected/1d848d1b-446a-42bb-a789-5df54f8217aa-kube-api-access-9mdwd\") pod \"1d848d1b-446a-42bb-a789-5df54f8217aa\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.108350 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d848d1b-446a-42bb-a789-5df54f8217aa-serving-cert\") pod \"1d848d1b-446a-42bb-a789-5df54f8217aa\" (UID: \"1d848d1b-446a-42bb-a789-5df54f8217aa\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.109099 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-client-ca" (OuterVolumeSpecName: "client-ca") pod "1d848d1b-446a-42bb-a789-5df54f8217aa" (UID: "1d848d1b-446a-42bb-a789-5df54f8217aa"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.109200 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-config" (OuterVolumeSpecName: "config") pod "1d848d1b-446a-42bb-a789-5df54f8217aa" (UID: "1d848d1b-446a-42bb-a789-5df54f8217aa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.109590 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.109617 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1d848d1b-446a-42bb-a789-5df54f8217aa-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.117916 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d848d1b-446a-42bb-a789-5df54f8217aa-kube-api-access-9mdwd" (OuterVolumeSpecName: "kube-api-access-9mdwd") pod "1d848d1b-446a-42bb-a789-5df54f8217aa" (UID: "1d848d1b-446a-42bb-a789-5df54f8217aa"). InnerVolumeSpecName "kube-api-access-9mdwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.131324 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d848d1b-446a-42bb-a789-5df54f8217aa-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1d848d1b-446a-42bb-a789-5df54f8217aa" (UID: "1d848d1b-446a-42bb-a789-5df54f8217aa"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.150634 4959 generic.go:334] "Generic (PLEG): container finished" podID="1d848d1b-446a-42bb-a789-5df54f8217aa" containerID="3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5" exitCode=0 Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.150701 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" event={"ID":"1d848d1b-446a-42bb-a789-5df54f8217aa","Type":"ContainerDied","Data":"3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5"} Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.150737 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" event={"ID":"1d848d1b-446a-42bb-a789-5df54f8217aa","Type":"ContainerDied","Data":"b253251638ab156ace42b68741d30230565e958908e7e6c0f9bbd7ca6cc0b888"} Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.150757 4959 scope.go:117] "RemoveContainer" containerID="3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.150882 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.157544 4959 generic.go:334] "Generic (PLEG): container finished" podID="1b3091fe-30cf-44cb-99b5-81f32e4f10d4" containerID="91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6" exitCode=0 Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.157596 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" event={"ID":"1b3091fe-30cf-44cb-99b5-81f32e4f10d4","Type":"ContainerDied","Data":"91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6"} Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.157637 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" event={"ID":"1b3091fe-30cf-44cb-99b5-81f32e4f10d4","Type":"ContainerDied","Data":"b1a43916c0b64f2726b95bf57da5c2ec071054885d7efe9b0e6cea03a7ab0113"} Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.157712 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5dd877f547-2kjht" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.175704 4959 scope.go:117] "RemoveContainer" containerID="3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5" Jan 28 15:22:06 crc kubenswrapper[4959]: E0128 15:22:06.180043 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5\": container with ID starting with 3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5 not found: ID does not exist" containerID="3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.180096 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5"} err="failed to get container status \"3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5\": rpc error: code = NotFound desc = could not find container \"3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5\": container with ID starting with 3d8d4902a322248e20cb528d911d7df7ce0e9aa020bb8d208b93696e677befa5 not found: ID does not exist" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.180173 4959 scope.go:117] "RemoveContainer" containerID="91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.185199 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc"] Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.191329 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7dd59f5db4-548wc"] Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.196980 4959 scope.go:117] "RemoveContainer" containerID="91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6" Jan 28 15:22:06 crc kubenswrapper[4959]: E0128 15:22:06.200295 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6\": container with ID starting with 91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6 not found: ID does not exist" containerID="91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.200341 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6"} err="failed to get container status \"91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6\": rpc error: code = NotFound desc = could not find container \"91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6\": container with ID starting with 91af05690e0952cccbe99415ea63f18050d2c2015099acc7b2127bbb72aaccd6 not found: ID does not exist" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.210861 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-proxy-ca-bundles\") pod \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.210923 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-serving-cert\") pod \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.210952 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgtvq\" (UniqueName: \"kubernetes.io/projected/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-kube-api-access-mgtvq\") pod \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.210974 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-config\") pod \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.211045 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-client-ca\") pod \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\" (UID: \"1b3091fe-30cf-44cb-99b5-81f32e4f10d4\") " Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.211211 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-config\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.211251 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7e0f32-c679-4104-884b-63b8e4fcc35c-serving-cert\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.211276 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-client-ca\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.211300 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-js7vw\" (UniqueName: \"kubernetes.io/projected/5f7e0f32-c679-4104-884b-63b8e4fcc35c-kube-api-access-js7vw\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.211376 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mdwd\" (UniqueName: \"kubernetes.io/projected/1d848d1b-446a-42bb-a789-5df54f8217aa-kube-api-access-9mdwd\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.211388 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d848d1b-446a-42bb-a789-5df54f8217aa-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.212097 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "1b3091fe-30cf-44cb-99b5-81f32e4f10d4" (UID: "1b3091fe-30cf-44cb-99b5-81f32e4f10d4"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.212639 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-client-ca" (OuterVolumeSpecName: "client-ca") pod "1b3091fe-30cf-44cb-99b5-81f32e4f10d4" (UID: "1b3091fe-30cf-44cb-99b5-81f32e4f10d4"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.212826 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-config" (OuterVolumeSpecName: "config") pod "1b3091fe-30cf-44cb-99b5-81f32e4f10d4" (UID: "1b3091fe-30cf-44cb-99b5-81f32e4f10d4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.217598 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1b3091fe-30cf-44cb-99b5-81f32e4f10d4" (UID: "1b3091fe-30cf-44cb-99b5-81f32e4f10d4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.218865 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-kube-api-access-mgtvq" (OuterVolumeSpecName: "kube-api-access-mgtvq") pod "1b3091fe-30cf-44cb-99b5-81f32e4f10d4" (UID: "1b3091fe-30cf-44cb-99b5-81f32e4f10d4"). InnerVolumeSpecName "kube-api-access-mgtvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312776 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-config\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312841 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7e0f32-c679-4104-884b-63b8e4fcc35c-serving-cert\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312866 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-client-ca\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312886 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-js7vw\" (UniqueName: \"kubernetes.io/projected/5f7e0f32-c679-4104-884b-63b8e4fcc35c-kube-api-access-js7vw\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312942 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312953 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgtvq\" (UniqueName: \"kubernetes.io/projected/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-kube-api-access-mgtvq\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312963 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312973 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.312981 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1b3091fe-30cf-44cb-99b5-81f32e4f10d4-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.314817 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-config\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.314878 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-client-ca\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.317966 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7e0f32-c679-4104-884b-63b8e4fcc35c-serving-cert\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.339757 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-js7vw\" (UniqueName: \"kubernetes.io/projected/5f7e0f32-c679-4104-884b-63b8e4fcc35c-kube-api-access-js7vw\") pod \"route-controller-manager-56d7b9dd7f-q2qms\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.401334 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.492916 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5dd877f547-2kjht"] Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.499153 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5dd877f547-2kjht"] Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.596218 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b3091fe-30cf-44cb-99b5-81f32e4f10d4" path="/var/lib/kubelet/pods/1b3091fe-30cf-44cb-99b5-81f32e4f10d4/volumes" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.596938 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d848d1b-446a-42bb-a789-5df54f8217aa" path="/var/lib/kubelet/pods/1d848d1b-446a-42bb-a789-5df54f8217aa/volumes" Jan 28 15:22:06 crc kubenswrapper[4959]: I0128 15:22:06.831923 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms"] Jan 28 15:22:07 crc kubenswrapper[4959]: I0128 15:22:07.186597 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" event={"ID":"5f7e0f32-c679-4104-884b-63b8e4fcc35c","Type":"ContainerStarted","Data":"4fc7cd3007bbc03db94882764562d54a8a2be612c5f20c1e1158b3a9ce3fa6c3"} Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.196364 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" event={"ID":"5f7e0f32-c679-4104-884b-63b8e4fcc35c","Type":"ContainerStarted","Data":"d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f"} Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.199223 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5"] Jan 28 15:22:08 crc kubenswrapper[4959]: E0128 15:22:08.199649 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b3091fe-30cf-44cb-99b5-81f32e4f10d4" containerName="controller-manager" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.199804 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b3091fe-30cf-44cb-99b5-81f32e4f10d4" containerName="controller-manager" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.200091 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b3091fe-30cf-44cb-99b5-81f32e4f10d4" containerName="controller-manager" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.200808 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.200982 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.206325 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.206832 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.207058 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.208076 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.207171 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.207515 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.214214 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5"] Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.217085 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.218007 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.227333 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" podStartSLOduration=4.22730237 podStartE2EDuration="4.22730237s" podCreationTimestamp="2026-01-28 15:22:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:22:08.225541905 +0000 UTC m=+311.671448318" watchObservedRunningTime="2026-01-28 15:22:08.22730237 +0000 UTC m=+311.673208763" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.354447 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-client-ca\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.354665 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-config\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.354745 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/272a933d-2451-4619-b6dd-53442641b5d8-serving-cert\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.354848 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-proxy-ca-bundles\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.354886 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mz65\" (UniqueName: \"kubernetes.io/projected/272a933d-2451-4619-b6dd-53442641b5d8-kube-api-access-8mz65\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.456504 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-client-ca\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.456645 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-config\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.456866 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/272a933d-2451-4619-b6dd-53442641b5d8-serving-cert\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.456903 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-proxy-ca-bundles\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.456927 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mz65\" (UniqueName: \"kubernetes.io/projected/272a933d-2451-4619-b6dd-53442641b5d8-kube-api-access-8mz65\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.457960 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-client-ca\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.458589 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-proxy-ca-bundles\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.459167 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-config\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.466665 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/272a933d-2451-4619-b6dd-53442641b5d8-serving-cert\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.475246 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mz65\" (UniqueName: \"kubernetes.io/projected/272a933d-2451-4619-b6dd-53442641b5d8-kube-api-access-8mz65\") pod \"controller-manager-85b88dc7c7-w4vn5\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.525233 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:08 crc kubenswrapper[4959]: I0128 15:22:08.761419 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5"] Jan 28 15:22:08 crc kubenswrapper[4959]: W0128 15:22:08.765380 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod272a933d_2451_4619_b6dd_53442641b5d8.slice/crio-fb6d6747f39bce019c347a4805e1ea0bc0bacab9d64b4b6c648fc0737d2b253a WatchSource:0}: Error finding container fb6d6747f39bce019c347a4805e1ea0bc0bacab9d64b4b6c648fc0737d2b253a: Status 404 returned error can't find the container with id fb6d6747f39bce019c347a4805e1ea0bc0bacab9d64b4b6c648fc0737d2b253a Jan 28 15:22:09 crc kubenswrapper[4959]: I0128 15:22:09.208199 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 28 15:22:09 crc kubenswrapper[4959]: I0128 15:22:09.210687 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 15:22:09 crc kubenswrapper[4959]: I0128 15:22:09.210743 4959 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="999cff687b60b9f76572b44b95153625bc63a42bb7f802665d88aaf10060c426" exitCode=137 Jan 28 15:22:09 crc kubenswrapper[4959]: I0128 15:22:09.210819 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"999cff687b60b9f76572b44b95153625bc63a42bb7f802665d88aaf10060c426"} Jan 28 15:22:09 crc kubenswrapper[4959]: I0128 15:22:09.210871 4959 scope.go:117] "RemoveContainer" containerID="f2e0c196ea0d5e1e420613d85a3492c97e20c48f8b0b07804ceadef69d94e2dc" Jan 28 15:22:09 crc kubenswrapper[4959]: I0128 15:22:09.214656 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" event={"ID":"272a933d-2451-4619-b6dd-53442641b5d8","Type":"ContainerStarted","Data":"5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe"} Jan 28 15:22:09 crc kubenswrapper[4959]: I0128 15:22:09.214720 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" event={"ID":"272a933d-2451-4619-b6dd-53442641b5d8","Type":"ContainerStarted","Data":"fb6d6747f39bce019c347a4805e1ea0bc0bacab9d64b4b6c648fc0737d2b253a"} Jan 28 15:22:09 crc kubenswrapper[4959]: I0128 15:22:09.250388 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" podStartSLOduration=5.250364622 podStartE2EDuration="5.250364622s" podCreationTimestamp="2026-01-28 15:22:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:22:09.244561002 +0000 UTC m=+312.690467415" watchObservedRunningTime="2026-01-28 15:22:09.250364622 +0000 UTC m=+312.696271005" Jan 28 15:22:10 crc kubenswrapper[4959]: I0128 15:22:10.225541 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 28 15:22:10 crc kubenswrapper[4959]: I0128 15:22:10.227797 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ad069388782371a6a7c1067474fed3d8e64587c3f7d0891a2e36a10041fbb603"} Jan 28 15:22:10 crc kubenswrapper[4959]: I0128 15:22:10.228530 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:10 crc kubenswrapper[4959]: I0128 15:22:10.236011 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:10 crc kubenswrapper[4959]: I0128 15:22:10.722566 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:22:13 crc kubenswrapper[4959]: I0128 15:22:13.736266 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 15:22:14 crc kubenswrapper[4959]: I0128 15:22:14.630527 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 15:22:14 crc kubenswrapper[4959]: I0128 15:22:14.811340 4959 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 15:22:15 crc kubenswrapper[4959]: I0128 15:22:15.888756 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 15:22:18 crc kubenswrapper[4959]: I0128 15:22:18.470313 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:22:18 crc kubenswrapper[4959]: I0128 15:22:18.475054 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:22:18 crc kubenswrapper[4959]: I0128 15:22:18.633129 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 15:22:18 crc kubenswrapper[4959]: I0128 15:22:18.817075 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.263365 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z8lcg"] Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.263730 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z8lcg" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerName="registry-server" containerID="cri-o://3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74" gracePeriod=2 Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.270550 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.308289 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.743950 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.815409 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-catalog-content\") pod \"b181a50d-3075-479c-b460-bd2addc3e6b3\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.815525 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-utilities\") pod \"b181a50d-3075-479c-b460-bd2addc3e6b3\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.815685 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7s8j\" (UniqueName: \"kubernetes.io/projected/b181a50d-3075-479c-b460-bd2addc3e6b3-kube-api-access-t7s8j\") pod \"b181a50d-3075-479c-b460-bd2addc3e6b3\" (UID: \"b181a50d-3075-479c-b460-bd2addc3e6b3\") " Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.816728 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-utilities" (OuterVolumeSpecName: "utilities") pod "b181a50d-3075-479c-b460-bd2addc3e6b3" (UID: "b181a50d-3075-479c-b460-bd2addc3e6b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.817236 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.824618 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b181a50d-3075-479c-b460-bd2addc3e6b3-kube-api-access-t7s8j" (OuterVolumeSpecName: "kube-api-access-t7s8j") pod "b181a50d-3075-479c-b460-bd2addc3e6b3" (UID: "b181a50d-3075-479c-b460-bd2addc3e6b3"). InnerVolumeSpecName "kube-api-access-t7s8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.871495 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b181a50d-3075-479c-b460-bd2addc3e6b3" (UID: "b181a50d-3075-479c-b460-bd2addc3e6b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.918992 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7s8j\" (UniqueName: \"kubernetes.io/projected/b181a50d-3075-479c-b460-bd2addc3e6b3-kube-api-access-t7s8j\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:19 crc kubenswrapper[4959]: I0128 15:22:19.919057 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b181a50d-3075-479c-b460-bd2addc3e6b3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.307786 4959 generic.go:334] "Generic (PLEG): container finished" podID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerID="3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74" exitCode=0 Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.307863 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8lcg" event={"ID":"b181a50d-3075-479c-b460-bd2addc3e6b3","Type":"ContainerDied","Data":"3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74"} Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.307903 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z8lcg" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.307979 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z8lcg" event={"ID":"b181a50d-3075-479c-b460-bd2addc3e6b3","Type":"ContainerDied","Data":"78ea1d727582bde5d400a23d7e9c57cc7ee35bace2db576d6237e7867d574e36"} Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.307998 4959 scope.go:117] "RemoveContainer" containerID="3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.325889 4959 scope.go:117] "RemoveContainer" containerID="23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.343025 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z8lcg"] Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.343411 4959 scope.go:117] "RemoveContainer" containerID="039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.353645 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z8lcg"] Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.372317 4959 scope.go:117] "RemoveContainer" containerID="3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74" Jan 28 15:22:20 crc kubenswrapper[4959]: E0128 15:22:20.372785 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74\": container with ID starting with 3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74 not found: ID does not exist" containerID="3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.372822 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74"} err="failed to get container status \"3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74\": rpc error: code = NotFound desc = could not find container \"3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74\": container with ID starting with 3724fc3fd3469c52a2cd02735061e2ef492a47fe642df0b2d761210964707a74 not found: ID does not exist" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.372847 4959 scope.go:117] "RemoveContainer" containerID="23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4" Jan 28 15:22:20 crc kubenswrapper[4959]: E0128 15:22:20.373133 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4\": container with ID starting with 23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4 not found: ID does not exist" containerID="23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.373155 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4"} err="failed to get container status \"23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4\": rpc error: code = NotFound desc = could not find container \"23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4\": container with ID starting with 23c43aa7c2faa86c27ea5ffda125638982614c02c4d284dc2527fafec4da7fa4 not found: ID does not exist" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.373169 4959 scope.go:117] "RemoveContainer" containerID="039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806" Jan 28 15:22:20 crc kubenswrapper[4959]: E0128 15:22:20.373552 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806\": container with ID starting with 039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806 not found: ID does not exist" containerID="039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.373588 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806"} err="failed to get container status \"039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806\": rpc error: code = NotFound desc = could not find container \"039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806\": container with ID starting with 039e4083d3a0deeec28fc7739a73d1119747f6f258beefa26a281a826eb6e806 not found: ID does not exist" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.416879 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 15:22:20 crc kubenswrapper[4959]: I0128 15:22:20.595530 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" path="/var/lib/kubelet/pods/b181a50d-3075-479c-b460-bd2addc3e6b3/volumes" Jan 28 15:22:21 crc kubenswrapper[4959]: I0128 15:22:21.069885 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 15:22:24 crc kubenswrapper[4959]: I0128 15:22:24.788387 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 15:22:26 crc kubenswrapper[4959]: I0128 15:22:26.009345 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 15:22:26 crc kubenswrapper[4959]: I0128 15:22:26.543380 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms"] Jan 28 15:22:26 crc kubenswrapper[4959]: I0128 15:22:26.543703 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" podUID="5f7e0f32-c679-4104-884b-63b8e4fcc35c" containerName="route-controller-manager" containerID="cri-o://d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f" gracePeriod=30 Jan 28 15:22:26 crc kubenswrapper[4959]: I0128 15:22:26.574904 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5"] Jan 28 15:22:26 crc kubenswrapper[4959]: I0128 15:22:26.575216 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" podUID="272a933d-2451-4619-b6dd-53442641b5d8" containerName="controller-manager" containerID="cri-o://5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe" gracePeriod=30 Jan 28 15:22:26 crc kubenswrapper[4959]: I0128 15:22:26.793341 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.209233 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.302826 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326169 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-client-ca\") pod \"272a933d-2451-4619-b6dd-53442641b5d8\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326270 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mz65\" (UniqueName: \"kubernetes.io/projected/272a933d-2451-4619-b6dd-53442641b5d8-kube-api-access-8mz65\") pod \"272a933d-2451-4619-b6dd-53442641b5d8\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326342 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-config\") pod \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326406 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-client-ca\") pod \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326433 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7e0f32-c679-4104-884b-63b8e4fcc35c-serving-cert\") pod \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326471 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-config\") pod \"272a933d-2451-4619-b6dd-53442641b5d8\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326516 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/272a933d-2451-4619-b6dd-53442641b5d8-serving-cert\") pod \"272a933d-2451-4619-b6dd-53442641b5d8\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326546 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-proxy-ca-bundles\") pod \"272a933d-2451-4619-b6dd-53442641b5d8\" (UID: \"272a933d-2451-4619-b6dd-53442641b5d8\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.326582 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-js7vw\" (UniqueName: \"kubernetes.io/projected/5f7e0f32-c679-4104-884b-63b8e4fcc35c-kube-api-access-js7vw\") pod \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\" (UID: \"5f7e0f32-c679-4104-884b-63b8e4fcc35c\") " Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.337311 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-client-ca" (OuterVolumeSpecName: "client-ca") pod "5f7e0f32-c679-4104-884b-63b8e4fcc35c" (UID: "5f7e0f32-c679-4104-884b-63b8e4fcc35c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.337949 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-config" (OuterVolumeSpecName: "config") pod "5f7e0f32-c679-4104-884b-63b8e4fcc35c" (UID: "5f7e0f32-c679-4104-884b-63b8e4fcc35c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.338566 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-client-ca" (OuterVolumeSpecName: "client-ca") pod "272a933d-2451-4619-b6dd-53442641b5d8" (UID: "272a933d-2451-4619-b6dd-53442641b5d8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.338778 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "272a933d-2451-4619-b6dd-53442641b5d8" (UID: "272a933d-2451-4619-b6dd-53442641b5d8"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.338758 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-config" (OuterVolumeSpecName: "config") pod "272a933d-2451-4619-b6dd-53442641b5d8" (UID: "272a933d-2451-4619-b6dd-53442641b5d8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.345963 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/272a933d-2451-4619-b6dd-53442641b5d8-kube-api-access-8mz65" (OuterVolumeSpecName: "kube-api-access-8mz65") pod "272a933d-2451-4619-b6dd-53442641b5d8" (UID: "272a933d-2451-4619-b6dd-53442641b5d8"). InnerVolumeSpecName "kube-api-access-8mz65". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.346421 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/272a933d-2451-4619-b6dd-53442641b5d8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "272a933d-2451-4619-b6dd-53442641b5d8" (UID: "272a933d-2451-4619-b6dd-53442641b5d8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.348030 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f7e0f32-c679-4104-884b-63b8e4fcc35c-kube-api-access-js7vw" (OuterVolumeSpecName: "kube-api-access-js7vw") pod "5f7e0f32-c679-4104-884b-63b8e4fcc35c" (UID: "5f7e0f32-c679-4104-884b-63b8e4fcc35c"). InnerVolumeSpecName "kube-api-access-js7vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.352504 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f7e0f32-c679-4104-884b-63b8e4fcc35c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5f7e0f32-c679-4104-884b-63b8e4fcc35c" (UID: "5f7e0f32-c679-4104-884b-63b8e4fcc35c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.366811 4959 generic.go:334] "Generic (PLEG): container finished" podID="5f7e0f32-c679-4104-884b-63b8e4fcc35c" containerID="d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f" exitCode=0 Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.366955 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" event={"ID":"5f7e0f32-c679-4104-884b-63b8e4fcc35c","Type":"ContainerDied","Data":"d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f"} Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.367018 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" event={"ID":"5f7e0f32-c679-4104-884b-63b8e4fcc35c","Type":"ContainerDied","Data":"4fc7cd3007bbc03db94882764562d54a8a2be612c5f20c1e1158b3a9ce3fa6c3"} Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.367040 4959 scope.go:117] "RemoveContainer" containerID="d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.367040 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.369398 4959 generic.go:334] "Generic (PLEG): container finished" podID="272a933d-2451-4619-b6dd-53442641b5d8" containerID="5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe" exitCode=0 Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.369526 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" event={"ID":"272a933d-2451-4619-b6dd-53442641b5d8","Type":"ContainerDied","Data":"5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe"} Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.369548 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" event={"ID":"272a933d-2451-4619-b6dd-53442641b5d8","Type":"ContainerDied","Data":"fb6d6747f39bce019c347a4805e1ea0bc0bacab9d64b4b6c648fc0737d2b253a"} Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.369639 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.395584 4959 scope.go:117] "RemoveContainer" containerID="d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f" Jan 28 15:22:27 crc kubenswrapper[4959]: E0128 15:22:27.401762 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f\": container with ID starting with d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f not found: ID does not exist" containerID="d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.401845 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f"} err="failed to get container status \"d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f\": rpc error: code = NotFound desc = could not find container \"d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f\": container with ID starting with d932fe3aa7c01f4205db363b1471dca045796a00f35ea3dc28b71f3bfcbfd78f not found: ID does not exist" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.401894 4959 scope.go:117] "RemoveContainer" containerID="5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.409334 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms"] Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.419206 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56d7b9dd7f-q2qms"] Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.421622 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5"] Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.426345 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-85b88dc7c7-w4vn5"] Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.428523 4959 scope.go:117] "RemoveContainer" containerID="5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe" Jan 28 15:22:27 crc kubenswrapper[4959]: E0128 15:22:27.429488 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe\": container with ID starting with 5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe not found: ID does not exist" containerID="5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.429583 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe"} err="failed to get container status \"5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe\": rpc error: code = NotFound desc = could not find container \"5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe\": container with ID starting with 5a80de4c44908adc5d534131f15f6a8225bc12be4f00495b1ac49e54737e8bbe not found: ID does not exist" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.429767 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mz65\" (UniqueName: \"kubernetes.io/projected/272a933d-2451-4619-b6dd-53442641b5d8-kube-api-access-8mz65\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.429832 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.429851 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7e0f32-c679-4104-884b-63b8e4fcc35c-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.429934 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7e0f32-c679-4104-884b-63b8e4fcc35c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.429957 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.429998 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/272a933d-2451-4619-b6dd-53442641b5d8-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.430014 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.430027 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-js7vw\" (UniqueName: \"kubernetes.io/projected/5f7e0f32-c679-4104-884b-63b8e4fcc35c-kube-api-access-js7vw\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:27 crc kubenswrapper[4959]: I0128 15:22:27.430040 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/272a933d-2451-4619-b6dd-53442641b5d8-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.203675 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv"] Jan 28 15:22:28 crc kubenswrapper[4959]: E0128 15:22:28.204005 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerName="extract-content" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204024 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerName="extract-content" Jan 28 15:22:28 crc kubenswrapper[4959]: E0128 15:22:28.204042 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerName="registry-server" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204051 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerName="registry-server" Jan 28 15:22:28 crc kubenswrapper[4959]: E0128 15:22:28.204060 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f7e0f32-c679-4104-884b-63b8e4fcc35c" containerName="route-controller-manager" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204066 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f7e0f32-c679-4104-884b-63b8e4fcc35c" containerName="route-controller-manager" Jan 28 15:22:28 crc kubenswrapper[4959]: E0128 15:22:28.204076 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerName="extract-utilities" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204083 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerName="extract-utilities" Jan 28 15:22:28 crc kubenswrapper[4959]: E0128 15:22:28.204095 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="272a933d-2451-4619-b6dd-53442641b5d8" containerName="controller-manager" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204123 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="272a933d-2451-4619-b6dd-53442641b5d8" containerName="controller-manager" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204228 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="b181a50d-3075-479c-b460-bd2addc3e6b3" containerName="registry-server" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204241 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="272a933d-2451-4619-b6dd-53442641b5d8" containerName="controller-manager" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204252 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f7e0f32-c679-4104-884b-63b8e4fcc35c" containerName="route-controller-manager" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.204787 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.208889 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-84ff9bf658-x76wl"] Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.209034 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.210047 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.210161 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.210203 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.210403 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.210832 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.211847 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.219385 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-84ff9bf658-x76wl"] Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.220979 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.221246 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.221334 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.222330 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.222729 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.223292 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.225955 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv"] Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.229509 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241030 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-proxy-ca-bundles\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241081 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1763f7c3-62fd-43d6-b021-1f225de16705-serving-cert\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241121 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6sbh\" (UniqueName: \"kubernetes.io/projected/330ac85e-b20b-405b-9d27-351f152a00a9-kube-api-access-n6sbh\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241377 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4r84\" (UniqueName: \"kubernetes.io/projected/1763f7c3-62fd-43d6-b021-1f225de16705-kube-api-access-l4r84\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241556 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/330ac85e-b20b-405b-9d27-351f152a00a9-serving-cert\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241592 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-config\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241616 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-config\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241735 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-client-ca\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.241793 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-client-ca\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.343851 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-client-ca\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.344696 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-client-ca\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.344820 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-proxy-ca-bundles\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.344920 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1763f7c3-62fd-43d6-b021-1f225de16705-serving-cert\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.345003 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6sbh\" (UniqueName: \"kubernetes.io/projected/330ac85e-b20b-405b-9d27-351f152a00a9-kube-api-access-n6sbh\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.345090 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4r84\" (UniqueName: \"kubernetes.io/projected/1763f7c3-62fd-43d6-b021-1f225de16705-kube-api-access-l4r84\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.345221 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/330ac85e-b20b-405b-9d27-351f152a00a9-serving-cert\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.345297 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-client-ca\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.345321 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-config\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.345409 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-config\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.345776 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-client-ca\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.345887 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-proxy-ca-bundles\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.346578 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-config\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.347193 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-config\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.352650 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1763f7c3-62fd-43d6-b021-1f225de16705-serving-cert\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.358610 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/330ac85e-b20b-405b-9d27-351f152a00a9-serving-cert\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.363441 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6sbh\" (UniqueName: \"kubernetes.io/projected/330ac85e-b20b-405b-9d27-351f152a00a9-kube-api-access-n6sbh\") pod \"controller-manager-84ff9bf658-x76wl\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.367433 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4r84\" (UniqueName: \"kubernetes.io/projected/1763f7c3-62fd-43d6-b021-1f225de16705-kube-api-access-l4r84\") pod \"route-controller-manager-59fd5f9d46-6b5tv\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.575851 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.583228 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.594810 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="272a933d-2451-4619-b6dd-53442641b5d8" path="/var/lib/kubelet/pods/272a933d-2451-4619-b6dd-53442641b5d8/volumes" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.595892 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f7e0f32-c679-4104-884b-63b8e4fcc35c" path="/var/lib/kubelet/pods/5f7e0f32-c679-4104-884b-63b8e4fcc35c/volumes" Jan 28 15:22:28 crc kubenswrapper[4959]: I0128 15:22:28.904674 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv"] Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.067050 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-84ff9bf658-x76wl"] Jan 28 15:22:29 crc kubenswrapper[4959]: W0128 15:22:29.075206 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod330ac85e_b20b_405b_9d27_351f152a00a9.slice/crio-a5794dc057d849a8eb8ccf5136c5486743215a73451494b13529ce0455f04361 WatchSource:0}: Error finding container a5794dc057d849a8eb8ccf5136c5486743215a73451494b13529ce0455f04361: Status 404 returned error can't find the container with id a5794dc057d849a8eb8ccf5136c5486743215a73451494b13529ce0455f04361 Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.386280 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" event={"ID":"330ac85e-b20b-405b-9d27-351f152a00a9","Type":"ContainerStarted","Data":"febbc5c8f239d20c63d2e13b143b99552fbfeb66de2a20b33c01e60b0627d27c"} Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.386804 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" event={"ID":"330ac85e-b20b-405b-9d27-351f152a00a9","Type":"ContainerStarted","Data":"a5794dc057d849a8eb8ccf5136c5486743215a73451494b13529ce0455f04361"} Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.386828 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.388262 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" event={"ID":"1763f7c3-62fd-43d6-b021-1f225de16705","Type":"ContainerStarted","Data":"b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa"} Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.388319 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" event={"ID":"1763f7c3-62fd-43d6-b021-1f225de16705","Type":"ContainerStarted","Data":"a79d6a3c3dcd5c63f2e4c5131415ddb460072e6cf1f86ce1d1d4e0b6554131d9"} Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.388446 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.392007 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.407142 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" podStartSLOduration=3.407085738 podStartE2EDuration="3.407085738s" podCreationTimestamp="2026-01-28 15:22:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:22:29.405092046 +0000 UTC m=+332.850998439" watchObservedRunningTime="2026-01-28 15:22:29.407085738 +0000 UTC m=+332.852992121" Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.417865 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:29 crc kubenswrapper[4959]: I0128 15:22:29.430225 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" podStartSLOduration=3.430195096 podStartE2EDuration="3.430195096s" podCreationTimestamp="2026-01-28 15:22:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:22:29.427868744 +0000 UTC m=+332.873775147" watchObservedRunningTime="2026-01-28 15:22:29.430195096 +0000 UTC m=+332.876101489" Jan 28 15:22:31 crc kubenswrapper[4959]: I0128 15:22:31.464838 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv"] Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.405132 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" podUID="1763f7c3-62fd-43d6-b021-1f225de16705" containerName="route-controller-manager" containerID="cri-o://b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa" gracePeriod=30 Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.899991 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.914468 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-config\") pod \"1763f7c3-62fd-43d6-b021-1f225de16705\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.914534 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-client-ca\") pod \"1763f7c3-62fd-43d6-b021-1f225de16705\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.914585 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1763f7c3-62fd-43d6-b021-1f225de16705-serving-cert\") pod \"1763f7c3-62fd-43d6-b021-1f225de16705\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.914635 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4r84\" (UniqueName: \"kubernetes.io/projected/1763f7c3-62fd-43d6-b021-1f225de16705-kube-api-access-l4r84\") pod \"1763f7c3-62fd-43d6-b021-1f225de16705\" (UID: \"1763f7c3-62fd-43d6-b021-1f225de16705\") " Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.916376 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-config" (OuterVolumeSpecName: "config") pod "1763f7c3-62fd-43d6-b021-1f225de16705" (UID: "1763f7c3-62fd-43d6-b021-1f225de16705"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.916372 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-client-ca" (OuterVolumeSpecName: "client-ca") pod "1763f7c3-62fd-43d6-b021-1f225de16705" (UID: "1763f7c3-62fd-43d6-b021-1f225de16705"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.923504 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1763f7c3-62fd-43d6-b021-1f225de16705-kube-api-access-l4r84" (OuterVolumeSpecName: "kube-api-access-l4r84") pod "1763f7c3-62fd-43d6-b021-1f225de16705" (UID: "1763f7c3-62fd-43d6-b021-1f225de16705"). InnerVolumeSpecName "kube-api-access-l4r84". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.928082 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1763f7c3-62fd-43d6-b021-1f225de16705-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1763f7c3-62fd-43d6-b021-1f225de16705" (UID: "1763f7c3-62fd-43d6-b021-1f225de16705"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.938600 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn"] Jan 28 15:22:32 crc kubenswrapper[4959]: E0128 15:22:32.938880 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1763f7c3-62fd-43d6-b021-1f225de16705" containerName="route-controller-manager" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.938903 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1763f7c3-62fd-43d6-b021-1f225de16705" containerName="route-controller-manager" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.939054 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1763f7c3-62fd-43d6-b021-1f225de16705" containerName="route-controller-manager" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.939580 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:32 crc kubenswrapper[4959]: I0128 15:22:32.954012 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn"] Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.015634 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acf7bbbc-b206-46dd-981a-8ea5d92174a4-config\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.015686 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/acf7bbbc-b206-46dd-981a-8ea5d92174a4-client-ca\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.015711 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb7zh\" (UniqueName: \"kubernetes.io/projected/acf7bbbc-b206-46dd-981a-8ea5d92174a4-kube-api-access-cb7zh\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.015744 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acf7bbbc-b206-46dd-981a-8ea5d92174a4-serving-cert\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.015846 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4r84\" (UniqueName: \"kubernetes.io/projected/1763f7c3-62fd-43d6-b021-1f225de16705-kube-api-access-l4r84\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.016005 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.016060 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1763f7c3-62fd-43d6-b021-1f225de16705-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.016076 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1763f7c3-62fd-43d6-b021-1f225de16705-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.117127 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acf7bbbc-b206-46dd-981a-8ea5d92174a4-serving-cert\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.117231 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acf7bbbc-b206-46dd-981a-8ea5d92174a4-config\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.117264 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/acf7bbbc-b206-46dd-981a-8ea5d92174a4-client-ca\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.117297 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb7zh\" (UniqueName: \"kubernetes.io/projected/acf7bbbc-b206-46dd-981a-8ea5d92174a4-kube-api-access-cb7zh\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.118829 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/acf7bbbc-b206-46dd-981a-8ea5d92174a4-client-ca\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.118973 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acf7bbbc-b206-46dd-981a-8ea5d92174a4-config\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.124203 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acf7bbbc-b206-46dd-981a-8ea5d92174a4-serving-cert\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.148954 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb7zh\" (UniqueName: \"kubernetes.io/projected/acf7bbbc-b206-46dd-981a-8ea5d92174a4-kube-api-access-cb7zh\") pod \"route-controller-manager-7ff4957db6-qnpqn\" (UID: \"acf7bbbc-b206-46dd-981a-8ea5d92174a4\") " pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.279869 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.417441 4959 generic.go:334] "Generic (PLEG): container finished" podID="1763f7c3-62fd-43d6-b021-1f225de16705" containerID="b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa" exitCode=0 Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.417929 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" event={"ID":"1763f7c3-62fd-43d6-b021-1f225de16705","Type":"ContainerDied","Data":"b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa"} Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.417977 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" event={"ID":"1763f7c3-62fd-43d6-b021-1f225de16705","Type":"ContainerDied","Data":"a79d6a3c3dcd5c63f2e4c5131415ddb460072e6cf1f86ce1d1d4e0b6554131d9"} Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.418011 4959 scope.go:117] "RemoveContainer" containerID="b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.418201 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.448980 4959 scope.go:117] "RemoveContainer" containerID="b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa" Jan 28 15:22:33 crc kubenswrapper[4959]: E0128 15:22:33.449503 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa\": container with ID starting with b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa not found: ID does not exist" containerID="b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.449564 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa"} err="failed to get container status \"b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa\": rpc error: code = NotFound desc = could not find container \"b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa\": container with ID starting with b2d59de9ebfc4c12dec6b74384f3de9e57f65d8766c4f6f317ae56b5ce0bdafa not found: ID does not exist" Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.501998 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv"] Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.508079 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-59fd5f9d46-6b5tv"] Jan 28 15:22:33 crc kubenswrapper[4959]: I0128 15:22:33.776222 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn"] Jan 28 15:22:33 crc kubenswrapper[4959]: W0128 15:22:33.779370 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podacf7bbbc_b206_46dd_981a_8ea5d92174a4.slice/crio-e028f8be713906c6e881106160d78c360e8edb2df9802b6f116fe270b266e0c3 WatchSource:0}: Error finding container e028f8be713906c6e881106160d78c360e8edb2df9802b6f116fe270b266e0c3: Status 404 returned error can't find the container with id e028f8be713906c6e881106160d78c360e8edb2df9802b6f116fe270b266e0c3 Jan 28 15:22:34 crc kubenswrapper[4959]: I0128 15:22:34.430314 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" event={"ID":"acf7bbbc-b206-46dd-981a-8ea5d92174a4","Type":"ContainerStarted","Data":"0717d3b3e57dee31eb12620bb8f31a16e3d551cdb252d7e78c4975cb1a89a7a6"} Jan 28 15:22:34 crc kubenswrapper[4959]: I0128 15:22:34.431050 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:34 crc kubenswrapper[4959]: I0128 15:22:34.431079 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" event={"ID":"acf7bbbc-b206-46dd-981a-8ea5d92174a4","Type":"ContainerStarted","Data":"e028f8be713906c6e881106160d78c360e8edb2df9802b6f116fe270b266e0c3"} Jan 28 15:22:34 crc kubenswrapper[4959]: I0128 15:22:34.455834 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" podStartSLOduration=3.455795455 podStartE2EDuration="3.455795455s" podCreationTimestamp="2026-01-28 15:22:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:22:34.455305122 +0000 UTC m=+337.901211525" watchObservedRunningTime="2026-01-28 15:22:34.455795455 +0000 UTC m=+337.901701838" Jan 28 15:22:34 crc kubenswrapper[4959]: I0128 15:22:34.486904 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7ff4957db6-qnpqn" Jan 28 15:22:34 crc kubenswrapper[4959]: I0128 15:22:34.594615 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1763f7c3-62fd-43d6-b021-1f225de16705" path="/var/lib/kubelet/pods/1763f7c3-62fd-43d6-b021-1f225de16705/volumes" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.650628 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l4sqx"] Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.652421 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.676406 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l4sqx"] Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.694232 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0f471df8-e300-4d18-afaa-08109b882b73-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.694312 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-registry-tls\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.694360 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2b55\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-kube-api-access-j2b55\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.694392 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.694502 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0f471df8-e300-4d18-afaa-08109b882b73-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.694531 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-bound-sa-token\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.694582 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0f471df8-e300-4d18-afaa-08109b882b73-registry-certificates\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.694611 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0f471df8-e300-4d18-afaa-08109b882b73-trusted-ca\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.738803 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.795595 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0f471df8-e300-4d18-afaa-08109b882b73-registry-certificates\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.795680 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0f471df8-e300-4d18-afaa-08109b882b73-trusted-ca\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.795813 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0f471df8-e300-4d18-afaa-08109b882b73-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.795849 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-registry-tls\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.795881 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2b55\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-kube-api-access-j2b55\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.795924 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0f471df8-e300-4d18-afaa-08109b882b73-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.795960 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-bound-sa-token\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.796626 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0f471df8-e300-4d18-afaa-08109b882b73-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.798421 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0f471df8-e300-4d18-afaa-08109b882b73-trusted-ca\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.802308 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0f471df8-e300-4d18-afaa-08109b882b73-registry-certificates\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.805982 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-registry-tls\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.806831 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0f471df8-e300-4d18-afaa-08109b882b73-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.815014 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2b55\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-kube-api-access-j2b55\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.820333 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0f471df8-e300-4d18-afaa-08109b882b73-bound-sa-token\") pod \"image-registry-66df7c8f76-l4sqx\" (UID: \"0f471df8-e300-4d18-afaa-08109b882b73\") " pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.937488 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zxfjz"] Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.937811 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zxfjz" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerName="registry-server" containerID="cri-o://0a650494cac29e0dec741b654bda907d9f86f226bba06113a3dcf9e1d1d04dab" gracePeriod=30 Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.952054 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dz2cj"] Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.953589 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dz2cj" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="registry-server" containerID="cri-o://ab4ab4e6656581d7f764888cb2a806946cd194612e2d26761964a58f67dee94f" gracePeriod=30 Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.961574 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plqj9"] Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.961853 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" containerID="cri-o://37973bf303360e1029cfb16e4daed33e4d23a9a5eecf977a47dbccbd66f2f864" gracePeriod=30 Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.981602 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dkw87"] Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.981977 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dkw87" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="registry-server" containerID="cri-o://23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e" gracePeriod=30 Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.987950 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2tkvn"] Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.988335 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2tkvn" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="registry-server" containerID="cri-o://4485c1b986253cb9031ee7698b53bd6502153d0c492dc9861317371807ab950b" gracePeriod=30 Jan 28 15:22:50 crc kubenswrapper[4959]: I0128 15:22:50.996428 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.005794 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-484gz"] Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.006862 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.021635 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-484gz"] Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.051751 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/community-operators-dz2cj" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="registry-server" probeResult="failure" output="" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.056960 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/community-operators-dz2cj" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="registry-server" probeResult="failure" output="" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.100178 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ccd0a857-5b20-4589-8d52-b7339fa7524f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.100721 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4ww9\" (UniqueName: \"kubernetes.io/projected/ccd0a857-5b20-4589-8d52-b7339fa7524f-kube-api-access-g4ww9\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.100766 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ccd0a857-5b20-4589-8d52-b7339fa7524f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.202752 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4ww9\" (UniqueName: \"kubernetes.io/projected/ccd0a857-5b20-4589-8d52-b7339fa7524f-kube-api-access-g4ww9\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.202831 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ccd0a857-5b20-4589-8d52-b7339fa7524f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.202879 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ccd0a857-5b20-4589-8d52-b7339fa7524f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.204702 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ccd0a857-5b20-4589-8d52-b7339fa7524f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.210917 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ccd0a857-5b20-4589-8d52-b7339fa7524f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.220862 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4ww9\" (UniqueName: \"kubernetes.io/projected/ccd0a857-5b20-4589-8d52-b7339fa7524f-kube-api-access-g4ww9\") pod \"marketplace-operator-79b997595-484gz\" (UID: \"ccd0a857-5b20-4589-8d52-b7339fa7524f\") " pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.373724 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.425824 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-84ff9bf658-x76wl"] Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.436398 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" podUID="330ac85e-b20b-405b-9d27-351f152a00a9" containerName="controller-manager" containerID="cri-o://febbc5c8f239d20c63d2e13b143b99552fbfeb66de2a20b33c01e60b0627d27c" gracePeriod=30 Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.452910 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l4sqx"] Jan 28 15:22:51 crc kubenswrapper[4959]: W0128 15:22:51.569208 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f471df8_e300_4d18_afaa_08109b882b73.slice/crio-1ac7a80999f7cb71904f5499a59b1610cbb1178f712244959fe4822bc6c66ff6 WatchSource:0}: Error finding container 1ac7a80999f7cb71904f5499a59b1610cbb1178f712244959fe4822bc6c66ff6: Status 404 returned error can't find the container with id 1ac7a80999f7cb71904f5499a59b1610cbb1178f712244959fe4822bc6c66ff6 Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.579362 4959 generic.go:334] "Generic (PLEG): container finished" podID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerID="0a650494cac29e0dec741b654bda907d9f86f226bba06113a3dcf9e1d1d04dab" exitCode=0 Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.579531 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxfjz" event={"ID":"5b46d2a5-2d15-4841-97a6-b3768e4df1d4","Type":"ContainerDied","Data":"0a650494cac29e0dec741b654bda907d9f86f226bba06113a3dcf9e1d1d04dab"} Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.628717 4959 generic.go:334] "Generic (PLEG): container finished" podID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerID="4485c1b986253cb9031ee7698b53bd6502153d0c492dc9861317371807ab950b" exitCode=0 Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.628806 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2tkvn" event={"ID":"575b26ae-87aa-469e-9bd9-1b4384d80093","Type":"ContainerDied","Data":"4485c1b986253cb9031ee7698b53bd6502153d0c492dc9861317371807ab950b"} Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.631664 4959 generic.go:334] "Generic (PLEG): container finished" podID="2d4611de-0934-450c-a51e-67298e455900" containerID="37973bf303360e1029cfb16e4daed33e4d23a9a5eecf977a47dbccbd66f2f864" exitCode=0 Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.631736 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" event={"ID":"2d4611de-0934-450c-a51e-67298e455900","Type":"ContainerDied","Data":"37973bf303360e1029cfb16e4daed33e4d23a9a5eecf977a47dbccbd66f2f864"} Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.631785 4959 scope.go:117] "RemoveContainer" containerID="ae4343e6208f796196fabbce1d4dc7387a3ed9b393247c0c2d168565e75ec662" Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.635033 4959 generic.go:334] "Generic (PLEG): container finished" podID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerID="ab4ab4e6656581d7f764888cb2a806946cd194612e2d26761964a58f67dee94f" exitCode=0 Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.635084 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dz2cj" event={"ID":"288654ae-ff9c-4ab8-999a-29ca0266da2a","Type":"ContainerDied","Data":"ab4ab4e6656581d7f764888cb2a806946cd194612e2d26761964a58f67dee94f"} Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.638897 4959 generic.go:334] "Generic (PLEG): container finished" podID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerID="23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e" exitCode=0 Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.638948 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dkw87" event={"ID":"9058c198-cfe2-496a-b045-d3650a0a36bf","Type":"ContainerDied","Data":"23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e"} Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.640781 4959 generic.go:334] "Generic (PLEG): container finished" podID="330ac85e-b20b-405b-9d27-351f152a00a9" containerID="febbc5c8f239d20c63d2e13b143b99552fbfeb66de2a20b33c01e60b0627d27c" exitCode=0 Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.640809 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" event={"ID":"330ac85e-b20b-405b-9d27-351f152a00a9","Type":"ContainerDied","Data":"febbc5c8f239d20c63d2e13b143b99552fbfeb66de2a20b33c01e60b0627d27c"} Jan 28 15:22:51 crc kubenswrapper[4959]: I0128 15:22:51.842841 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-484gz"] Jan 28 15:22:51 crc kubenswrapper[4959]: W0128 15:22:51.852361 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podccd0a857_5b20_4589_8d52_b7339fa7524f.slice/crio-e9461e8b245885ad698e9759dab417b812765008c3066d9ee01c976653d3b355 WatchSource:0}: Error finding container e9461e8b245885ad698e9759dab417b812765008c3066d9ee01c976653d3b355: Status 404 returned error can't find the container with id e9461e8b245885ad698e9759dab417b812765008c3066d9ee01c976653d3b355 Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.278811 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:52 crc kubenswrapper[4959]: E0128 15:22:52.321372 4959 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e is running failed: container process not found" containerID="23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 15:22:52 crc kubenswrapper[4959]: E0128 15:22:52.322041 4959 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e is running failed: container process not found" containerID="23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 15:22:52 crc kubenswrapper[4959]: E0128 15:22:52.323355 4959 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e is running failed: container process not found" containerID="23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 15:22:52 crc kubenswrapper[4959]: E0128 15:22:52.323462 4959 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-dkw87" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="registry-server" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.323633 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6sbh\" (UniqueName: \"kubernetes.io/projected/330ac85e-b20b-405b-9d27-351f152a00a9-kube-api-access-n6sbh\") pod \"330ac85e-b20b-405b-9d27-351f152a00a9\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.323687 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/330ac85e-b20b-405b-9d27-351f152a00a9-serving-cert\") pod \"330ac85e-b20b-405b-9d27-351f152a00a9\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.323761 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-client-ca\") pod \"330ac85e-b20b-405b-9d27-351f152a00a9\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.323802 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-proxy-ca-bundles\") pod \"330ac85e-b20b-405b-9d27-351f152a00a9\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.323895 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-config\") pod \"330ac85e-b20b-405b-9d27-351f152a00a9\" (UID: \"330ac85e-b20b-405b-9d27-351f152a00a9\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.325233 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-client-ca" (OuterVolumeSpecName: "client-ca") pod "330ac85e-b20b-405b-9d27-351f152a00a9" (UID: "330ac85e-b20b-405b-9d27-351f152a00a9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.325372 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-config" (OuterVolumeSpecName: "config") pod "330ac85e-b20b-405b-9d27-351f152a00a9" (UID: "330ac85e-b20b-405b-9d27-351f152a00a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.325397 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "330ac85e-b20b-405b-9d27-351f152a00a9" (UID: "330ac85e-b20b-405b-9d27-351f152a00a9"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.333122 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/330ac85e-b20b-405b-9d27-351f152a00a9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "330ac85e-b20b-405b-9d27-351f152a00a9" (UID: "330ac85e-b20b-405b-9d27-351f152a00a9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.333144 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/330ac85e-b20b-405b-9d27-351f152a00a9-kube-api-access-n6sbh" (OuterVolumeSpecName: "kube-api-access-n6sbh") pod "330ac85e-b20b-405b-9d27-351f152a00a9" (UID: "330ac85e-b20b-405b-9d27-351f152a00a9"). InnerVolumeSpecName "kube-api-access-n6sbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.339487 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.340461 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.360642 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.391338 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.404059 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426619 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-utilities\") pod \"9058c198-cfe2-496a-b045-d3650a0a36bf\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426685 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-utilities\") pod \"575b26ae-87aa-469e-9bd9-1b4384d80093\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426724 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhlh2\" (UniqueName: \"kubernetes.io/projected/575b26ae-87aa-469e-9bd9-1b4384d80093-kube-api-access-jhlh2\") pod \"575b26ae-87aa-469e-9bd9-1b4384d80093\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426767 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpzst\" (UniqueName: \"kubernetes.io/projected/9058c198-cfe2-496a-b045-d3650a0a36bf-kube-api-access-bpzst\") pod \"9058c198-cfe2-496a-b045-d3650a0a36bf\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426787 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-catalog-content\") pod \"9058c198-cfe2-496a-b045-d3650a0a36bf\" (UID: \"9058c198-cfe2-496a-b045-d3650a0a36bf\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426831 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d4611de-0934-450c-a51e-67298e455900-marketplace-trusted-ca\") pod \"2d4611de-0934-450c-a51e-67298e455900\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426865 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-utilities\") pod \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426882 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksm4s\" (UniqueName: \"kubernetes.io/projected/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-kube-api-access-ksm4s\") pod \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426908 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znblt\" (UniqueName: \"kubernetes.io/projected/2d4611de-0934-450c-a51e-67298e455900-kube-api-access-znblt\") pod \"2d4611de-0934-450c-a51e-67298e455900\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426926 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-utilities\") pod \"288654ae-ff9c-4ab8-999a-29ca0266da2a\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426951 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-catalog-content\") pod \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\" (UID: \"5b46d2a5-2d15-4841-97a6-b3768e4df1d4\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.426974 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tr95z\" (UniqueName: \"kubernetes.io/projected/288654ae-ff9c-4ab8-999a-29ca0266da2a-kube-api-access-tr95z\") pod \"288654ae-ff9c-4ab8-999a-29ca0266da2a\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.427023 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2d4611de-0934-450c-a51e-67298e455900-marketplace-operator-metrics\") pod \"2d4611de-0934-450c-a51e-67298e455900\" (UID: \"2d4611de-0934-450c-a51e-67298e455900\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.427041 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-catalog-content\") pod \"575b26ae-87aa-469e-9bd9-1b4384d80093\" (UID: \"575b26ae-87aa-469e-9bd9-1b4384d80093\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.427068 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-catalog-content\") pod \"288654ae-ff9c-4ab8-999a-29ca0266da2a\" (UID: \"288654ae-ff9c-4ab8-999a-29ca0266da2a\") " Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.427383 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.427396 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6sbh\" (UniqueName: \"kubernetes.io/projected/330ac85e-b20b-405b-9d27-351f152a00a9-kube-api-access-n6sbh\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.427406 4959 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/330ac85e-b20b-405b-9d27-351f152a00a9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.427415 4959 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.427426 4959 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/330ac85e-b20b-405b-9d27-351f152a00a9-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.431356 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-utilities" (OuterVolumeSpecName: "utilities") pod "575b26ae-87aa-469e-9bd9-1b4384d80093" (UID: "575b26ae-87aa-469e-9bd9-1b4384d80093"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.431897 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-utilities" (OuterVolumeSpecName: "utilities") pod "9058c198-cfe2-496a-b045-d3650a0a36bf" (UID: "9058c198-cfe2-496a-b045-d3650a0a36bf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.429217 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-utilities" (OuterVolumeSpecName: "utilities") pod "5b46d2a5-2d15-4841-97a6-b3768e4df1d4" (UID: "5b46d2a5-2d15-4841-97a6-b3768e4df1d4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.435522 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d4611de-0934-450c-a51e-67298e455900-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "2d4611de-0934-450c-a51e-67298e455900" (UID: "2d4611de-0934-450c-a51e-67298e455900"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.436247 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-utilities" (OuterVolumeSpecName: "utilities") pod "288654ae-ff9c-4ab8-999a-29ca0266da2a" (UID: "288654ae-ff9c-4ab8-999a-29ca0266da2a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.436553 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d4611de-0934-450c-a51e-67298e455900-kube-api-access-znblt" (OuterVolumeSpecName: "kube-api-access-znblt") pod "2d4611de-0934-450c-a51e-67298e455900" (UID: "2d4611de-0934-450c-a51e-67298e455900"). InnerVolumeSpecName "kube-api-access-znblt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.438514 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/288654ae-ff9c-4ab8-999a-29ca0266da2a-kube-api-access-tr95z" (OuterVolumeSpecName: "kube-api-access-tr95z") pod "288654ae-ff9c-4ab8-999a-29ca0266da2a" (UID: "288654ae-ff9c-4ab8-999a-29ca0266da2a"). InnerVolumeSpecName "kube-api-access-tr95z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.440139 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d4611de-0934-450c-a51e-67298e455900-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "2d4611de-0934-450c-a51e-67298e455900" (UID: "2d4611de-0934-450c-a51e-67298e455900"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.455490 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-kube-api-access-ksm4s" (OuterVolumeSpecName: "kube-api-access-ksm4s") pod "5b46d2a5-2d15-4841-97a6-b3768e4df1d4" (UID: "5b46d2a5-2d15-4841-97a6-b3768e4df1d4"). InnerVolumeSpecName "kube-api-access-ksm4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.455575 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9058c198-cfe2-496a-b045-d3650a0a36bf-kube-api-access-bpzst" (OuterVolumeSpecName: "kube-api-access-bpzst") pod "9058c198-cfe2-496a-b045-d3650a0a36bf" (UID: "9058c198-cfe2-496a-b045-d3650a0a36bf"). InnerVolumeSpecName "kube-api-access-bpzst". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.467211 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/575b26ae-87aa-469e-9bd9-1b4384d80093-kube-api-access-jhlh2" (OuterVolumeSpecName: "kube-api-access-jhlh2") pod "575b26ae-87aa-469e-9bd9-1b4384d80093" (UID: "575b26ae-87aa-469e-9bd9-1b4384d80093"). InnerVolumeSpecName "kube-api-access-jhlh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.497080 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9058c198-cfe2-496a-b045-d3650a0a36bf" (UID: "9058c198-cfe2-496a-b045-d3650a0a36bf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.507446 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b46d2a5-2d15-4841-97a6-b3768e4df1d4" (UID: "5b46d2a5-2d15-4841-97a6-b3768e4df1d4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529065 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpzst\" (UniqueName: \"kubernetes.io/projected/9058c198-cfe2-496a-b045-d3650a0a36bf-kube-api-access-bpzst\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529126 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529139 4959 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d4611de-0934-450c-a51e-67298e455900-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529150 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529163 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksm4s\" (UniqueName: \"kubernetes.io/projected/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-kube-api-access-ksm4s\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529173 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znblt\" (UniqueName: \"kubernetes.io/projected/2d4611de-0934-450c-a51e-67298e455900-kube-api-access-znblt\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529183 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529192 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b46d2a5-2d15-4841-97a6-b3768e4df1d4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529203 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tr95z\" (UniqueName: \"kubernetes.io/projected/288654ae-ff9c-4ab8-999a-29ca0266da2a-kube-api-access-tr95z\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529212 4959 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2d4611de-0934-450c-a51e-67298e455900-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529224 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9058c198-cfe2-496a-b045-d3650a0a36bf-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529234 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.529243 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhlh2\" (UniqueName: \"kubernetes.io/projected/575b26ae-87aa-469e-9bd9-1b4384d80093-kube-api-access-jhlh2\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.534791 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "288654ae-ff9c-4ab8-999a-29ca0266da2a" (UID: "288654ae-ff9c-4ab8-999a-29ca0266da2a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.577259 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "575b26ae-87aa-469e-9bd9-1b4384d80093" (UID: "575b26ae-87aa-469e-9bd9-1b4384d80093"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.630660 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/575b26ae-87aa-469e-9bd9-1b4384d80093-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.630698 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/288654ae-ff9c-4ab8-999a-29ca0266da2a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.649278 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2tkvn" event={"ID":"575b26ae-87aa-469e-9bd9-1b4384d80093","Type":"ContainerDied","Data":"c02fd0ba9c3bcfcab6a875bfd7d343855fbcba1583b1be7e974841baae64479a"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.649345 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2tkvn" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.649385 4959 scope.go:117] "RemoveContainer" containerID="4485c1b986253cb9031ee7698b53bd6502153d0c492dc9861317371807ab950b" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.651476 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" event={"ID":"0f471df8-e300-4d18-afaa-08109b882b73","Type":"ContainerStarted","Data":"f2ac033fcc630b126a8f963e6c977aed298bfdaf6004cdbce38b901636fea1d7"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.652031 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.652065 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" event={"ID":"0f471df8-e300-4d18-afaa-08109b882b73","Type":"ContainerStarted","Data":"1ac7a80999f7cb71904f5499a59b1610cbb1178f712244959fe4822bc6c66ff6"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.657416 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" event={"ID":"2d4611de-0934-450c-a51e-67298e455900","Type":"ContainerDied","Data":"df1f96ba7469dce2921fd54cb92f262c9043c1c49b4bac4fd412727232b17138"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.657525 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.662362 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dz2cj" event={"ID":"288654ae-ff9c-4ab8-999a-29ca0266da2a","Type":"ContainerDied","Data":"1c90a8fb708d2fdc7dd9b1b949e059f55483775fcd620aacc06e4a5709829697"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.662400 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dz2cj" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.668704 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dkw87" event={"ID":"9058c198-cfe2-496a-b045-d3650a0a36bf","Type":"ContainerDied","Data":"b8ac479db03476586c8b1bcbb0f5c09c7fc430256e0f6feed4e5a52f77cd333c"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.668847 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dkw87" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.672377 4959 scope.go:117] "RemoveContainer" containerID="7f9c2bc05a6e5188c83d8856cfc405195ef7200e9757929685953c0ca21cd415" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.675857 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" podStartSLOduration=2.6758364390000002 podStartE2EDuration="2.675836439s" podCreationTimestamp="2026-01-28 15:22:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:22:52.675739986 +0000 UTC m=+356.121646379" watchObservedRunningTime="2026-01-28 15:22:52.675836439 +0000 UTC m=+356.121742822" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.677509 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.677517 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-84ff9bf658-x76wl" event={"ID":"330ac85e-b20b-405b-9d27-351f152a00a9","Type":"ContainerDied","Data":"a5794dc057d849a8eb8ccf5136c5486743215a73451494b13529ce0455f04361"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.683190 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-484gz" event={"ID":"ccd0a857-5b20-4589-8d52-b7339fa7524f","Type":"ContainerStarted","Data":"af491796cd93f06dd565ba4bc8b7c5809ffa35cf29f9a7791254daac90209a0a"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.683276 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-484gz" event={"ID":"ccd0a857-5b20-4589-8d52-b7339fa7524f","Type":"ContainerStarted","Data":"e9461e8b245885ad698e9759dab417b812765008c3066d9ee01c976653d3b355"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.683444 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.684905 4959 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-484gz container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.70:8080/healthz\": dial tcp 10.217.0.70:8080: connect: connection refused" start-of-body= Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.684985 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-484gz" podUID="ccd0a857-5b20-4589-8d52-b7339fa7524f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.70:8080/healthz\": dial tcp 10.217.0.70:8080: connect: connection refused" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.690462 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxfjz" event={"ID":"5b46d2a5-2d15-4841-97a6-b3768e4df1d4","Type":"ContainerDied","Data":"e0e9f087269f8d57fae3d4f1eac95aa9a307ebc0ce0bbd41633c240965c9b215"} Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.690625 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxfjz" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.696955 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dz2cj"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.700774 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dz2cj"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.704205 4959 scope.go:117] "RemoveContainer" containerID="79aef054e2ac683eec971f74c06a371db5e7237fdac49fc2a8df0d2f5ebdd358" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.713527 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2tkvn"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.747217 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2tkvn"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.751204 4959 scope.go:117] "RemoveContainer" containerID="37973bf303360e1029cfb16e4daed33e4d23a9a5eecf977a47dbccbd66f2f864" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.763263 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plqj9"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.780005 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plqj9"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.784241 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-84ff9bf658-x76wl"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.789283 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-84ff9bf658-x76wl"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.795717 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zxfjz"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.804005 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zxfjz"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.807954 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dkw87"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.809076 4959 scope.go:117] "RemoveContainer" containerID="ab4ab4e6656581d7f764888cb2a806946cd194612e2d26761964a58f67dee94f" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.818848 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dkw87"] Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.822479 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-484gz" podStartSLOduration=2.822456869 podStartE2EDuration="2.822456869s" podCreationTimestamp="2026-01-28 15:22:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:22:52.820813767 +0000 UTC m=+356.266720190" watchObservedRunningTime="2026-01-28 15:22:52.822456869 +0000 UTC m=+356.268363252" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.837912 4959 scope.go:117] "RemoveContainer" containerID="71c9c4e11553c331ee1292c6085d1c1b4dd933b5eb74679014a6a1966417bcf8" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.856889 4959 scope.go:117] "RemoveContainer" containerID="77ec4c0d4974ab232b0674713c70daba5437632953fa4303458902fb504a71ad" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.878081 4959 scope.go:117] "RemoveContainer" containerID="23b1ddd46e06f7ef4d548fd93feb7402c46aa18480d53a77b7d9b2f2a244ed5e" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.901067 4959 scope.go:117] "RemoveContainer" containerID="d4e941319bb58e5ecd05b03273dae04808307e37ac6a4bb3860643a2adc18d77" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.925954 4959 scope.go:117] "RemoveContainer" containerID="e37fc01c4af49c00ea64ad6159d0847776cc09f67cde277c376238c2f561087b" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.939730 4959 scope.go:117] "RemoveContainer" containerID="febbc5c8f239d20c63d2e13b143b99552fbfeb66de2a20b33c01e60b0627d27c" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.958993 4959 scope.go:117] "RemoveContainer" containerID="0a650494cac29e0dec741b654bda907d9f86f226bba06113a3dcf9e1d1d04dab" Jan 28 15:22:52 crc kubenswrapper[4959]: I0128 15:22:52.981301 4959 scope.go:117] "RemoveContainer" containerID="f83f326de2c0e36942e6a6c4700ac50a0b6bb5f1b1956cf27a0f0448d9d861d6" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.004609 4959 scope.go:117] "RemoveContainer" containerID="c468a9e1175f73700c086b5bba5b7203182f881ec67e93e82628bb848b6f804f" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.221827 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq"] Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222069 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222083 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222119 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222129 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222139 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="extract-utilities" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222147 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="extract-utilities" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222156 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerName="extract-utilities" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222163 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerName="extract-utilities" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222174 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222179 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222186 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="extract-utilities" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222192 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="extract-utilities" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222199 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="extract-content" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222204 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="extract-content" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222211 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerName="extract-content" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222216 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerName="extract-content" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222227 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="330ac85e-b20b-405b-9d27-351f152a00a9" containerName="controller-manager" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222234 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="330ac85e-b20b-405b-9d27-351f152a00a9" containerName="controller-manager" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222242 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222250 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222260 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222266 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222273 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="extract-utilities" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222280 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="extract-utilities" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222286 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222292 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222302 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="extract-content" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222308 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="extract-content" Jan 28 15:22:53 crc kubenswrapper[4959]: E0128 15:22:53.222318 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="extract-content" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222323 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="extract-content" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222422 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222434 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222442 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="330ac85e-b20b-405b-9d27-351f152a00a9" containerName="controller-manager" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222455 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222463 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222471 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222480 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" containerName="registry-server" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.222910 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.225498 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.225927 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.226215 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.226612 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.226920 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.227722 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.229463 4959 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plqj9 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.229532 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plqj9" podUID="2d4611de-0934-450c-a51e-67298e455900" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.29:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.233940 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.247392 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq"] Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.359122 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-client-ca\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.359213 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84f22c80-20e5-4729-bb3b-564ad8712a74-serving-cert\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.359284 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-config\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.359315 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v994q\" (UniqueName: \"kubernetes.io/projected/84f22c80-20e5-4729-bb3b-564ad8712a74-kube-api-access-v994q\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.359346 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-proxy-ca-bundles\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.375095 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fp6bt"] Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.380503 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.383199 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.384416 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fp6bt"] Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.461038 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-client-ca\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.461154 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84f22c80-20e5-4729-bb3b-564ad8712a74-serving-cert\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.461228 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-config\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.461257 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v994q\" (UniqueName: \"kubernetes.io/projected/84f22c80-20e5-4729-bb3b-564ad8712a74-kube-api-access-v994q\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.461291 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-proxy-ca-bundles\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.462898 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-proxy-ca-bundles\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.463092 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-config\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.463425 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/84f22c80-20e5-4729-bb3b-564ad8712a74-client-ca\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.471218 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/84f22c80-20e5-4729-bb3b-564ad8712a74-serving-cert\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.483154 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v994q\" (UniqueName: \"kubernetes.io/projected/84f22c80-20e5-4729-bb3b-564ad8712a74-kube-api-access-v994q\") pod \"controller-manager-6b68c66f9c-4fwcq\" (UID: \"84f22c80-20e5-4729-bb3b-564ad8712a74\") " pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.537977 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.562484 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7tcq\" (UniqueName: \"kubernetes.io/projected/5b88ba92-b8b0-4949-9885-c22425ad27be-kube-api-access-w7tcq\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.562631 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-catalog-content\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.562699 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-utilities\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.664078 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-utilities\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.664793 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7tcq\" (UniqueName: \"kubernetes.io/projected/5b88ba92-b8b0-4949-9885-c22425ad27be-kube-api-access-w7tcq\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.664853 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-catalog-content\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.665162 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-catalog-content\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.664834 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-utilities\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.690285 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7tcq\" (UniqueName: \"kubernetes.io/projected/5b88ba92-b8b0-4949-9885-c22425ad27be-kube-api-access-w7tcq\") pod \"redhat-operators-fp6bt\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.707953 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.720456 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-484gz" Jan 28 15:22:53 crc kubenswrapper[4959]: I0128 15:22:53.975792 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq"] Jan 28 15:22:53 crc kubenswrapper[4959]: W0128 15:22:53.982349 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84f22c80_20e5_4729_bb3b_564ad8712a74.slice/crio-a81962ef579b8c460b979d58e6364db328fa28e87a6fa17274ad69f4cf1cee7d WatchSource:0}: Error finding container a81962ef579b8c460b979d58e6364db328fa28e87a6fa17274ad69f4cf1cee7d: Status 404 returned error can't find the container with id a81962ef579b8c460b979d58e6364db328fa28e87a6fa17274ad69f4cf1cee7d Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.151982 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fp6bt"] Jan 28 15:22:54 crc kubenswrapper[4959]: W0128 15:22:54.162961 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b88ba92_b8b0_4949_9885_c22425ad27be.slice/crio-e27bb16abab3ea418f7db47d8393e8bcc6ac25b7d2b1f23d062c8eca0c9d45ce WatchSource:0}: Error finding container e27bb16abab3ea418f7db47d8393e8bcc6ac25b7d2b1f23d062c8eca0c9d45ce: Status 404 returned error can't find the container with id e27bb16abab3ea418f7db47d8393e8bcc6ac25b7d2b1f23d062c8eca0c9d45ce Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.597671 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="288654ae-ff9c-4ab8-999a-29ca0266da2a" path="/var/lib/kubelet/pods/288654ae-ff9c-4ab8-999a-29ca0266da2a/volumes" Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.598673 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d4611de-0934-450c-a51e-67298e455900" path="/var/lib/kubelet/pods/2d4611de-0934-450c-a51e-67298e455900/volumes" Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.599451 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="330ac85e-b20b-405b-9d27-351f152a00a9" path="/var/lib/kubelet/pods/330ac85e-b20b-405b-9d27-351f152a00a9/volumes" Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.601907 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="575b26ae-87aa-469e-9bd9-1b4384d80093" path="/var/lib/kubelet/pods/575b26ae-87aa-469e-9bd9-1b4384d80093/volumes" Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.602729 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b46d2a5-2d15-4841-97a6-b3768e4df1d4" path="/var/lib/kubelet/pods/5b46d2a5-2d15-4841-97a6-b3768e4df1d4/volumes" Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.604291 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9058c198-cfe2-496a-b045-d3650a0a36bf" path="/var/lib/kubelet/pods/9058c198-cfe2-496a-b045-d3650a0a36bf/volumes" Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.720593 4959 generic.go:334] "Generic (PLEG): container finished" podID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerID="a1522d18a37a405ce06effb85c30065618ffda51d08bf160be2e97fba025d388" exitCode=0 Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.720712 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fp6bt" event={"ID":"5b88ba92-b8b0-4949-9885-c22425ad27be","Type":"ContainerDied","Data":"a1522d18a37a405ce06effb85c30065618ffda51d08bf160be2e97fba025d388"} Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.721205 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fp6bt" event={"ID":"5b88ba92-b8b0-4949-9885-c22425ad27be","Type":"ContainerStarted","Data":"e27bb16abab3ea418f7db47d8393e8bcc6ac25b7d2b1f23d062c8eca0c9d45ce"} Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.722905 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" event={"ID":"84f22c80-20e5-4729-bb3b-564ad8712a74","Type":"ContainerStarted","Data":"951124b4d1683171e575a75de56dbfd20241a9b317de4b81e9c9e14c47acd034"} Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.722971 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" event={"ID":"84f22c80-20e5-4729-bb3b-564ad8712a74","Type":"ContainerStarted","Data":"a81962ef579b8c460b979d58e6364db328fa28e87a6fa17274ad69f4cf1cee7d"} Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.723145 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.731374 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" Jan 28 15:22:54 crc kubenswrapper[4959]: I0128 15:22:54.764084 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6b68c66f9c-4fwcq" podStartSLOduration=3.764053349 podStartE2EDuration="3.764053349s" podCreationTimestamp="2026-01-28 15:22:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:22:54.761726257 +0000 UTC m=+358.207632650" watchObservedRunningTime="2026-01-28 15:22:54.764053349 +0000 UTC m=+358.209959732" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.161932 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lzhkf"] Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.163201 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.166729 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.177605 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lzhkf"] Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.294155 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5185c08f-f7c9-4db8-a958-8a57a202824c-utilities\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.294368 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5185c08f-f7c9-4db8-a958-8a57a202824c-catalog-content\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.294439 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brm9q\" (UniqueName: \"kubernetes.io/projected/5185c08f-f7c9-4db8-a958-8a57a202824c-kube-api-access-brm9q\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.395996 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5185c08f-f7c9-4db8-a958-8a57a202824c-catalog-content\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.396074 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brm9q\" (UniqueName: \"kubernetes.io/projected/5185c08f-f7c9-4db8-a958-8a57a202824c-kube-api-access-brm9q\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.396165 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5185c08f-f7c9-4db8-a958-8a57a202824c-utilities\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.396640 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5185c08f-f7c9-4db8-a958-8a57a202824c-catalog-content\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.396730 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5185c08f-f7c9-4db8-a958-8a57a202824c-utilities\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.418833 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brm9q\" (UniqueName: \"kubernetes.io/projected/5185c08f-f7c9-4db8-a958-8a57a202824c-kube-api-access-brm9q\") pod \"certified-operators-lzhkf\" (UID: \"5185c08f-f7c9-4db8-a958-8a57a202824c\") " pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.491430 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.732769 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fp6bt" event={"ID":"5b88ba92-b8b0-4949-9885-c22425ad27be","Type":"ContainerStarted","Data":"d8f3b83f4780b51907d71c992da67a414034ac4926c619784c6ff8849f0f7919"} Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.763877 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r7nlb"] Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.765793 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.769153 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.780344 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r7nlb"] Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.903828 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3fe0784-f751-4e36-a8ce-804e95f72f12-catalog-content\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.903925 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3fe0784-f751-4e36-a8ce-804e95f72f12-utilities\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.904092 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99948\" (UniqueName: \"kubernetes.io/projected/d3fe0784-f751-4e36-a8ce-804e95f72f12-kube-api-access-99948\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:55 crc kubenswrapper[4959]: I0128 15:22:55.944653 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lzhkf"] Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.005908 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99948\" (UniqueName: \"kubernetes.io/projected/d3fe0784-f751-4e36-a8ce-804e95f72f12-kube-api-access-99948\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.005994 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3fe0784-f751-4e36-a8ce-804e95f72f12-catalog-content\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.006025 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3fe0784-f751-4e36-a8ce-804e95f72f12-utilities\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.006683 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3fe0784-f751-4e36-a8ce-804e95f72f12-catalog-content\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.006777 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3fe0784-f751-4e36-a8ce-804e95f72f12-utilities\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.033148 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99948\" (UniqueName: \"kubernetes.io/projected/d3fe0784-f751-4e36-a8ce-804e95f72f12-kube-api-access-99948\") pod \"community-operators-r7nlb\" (UID: \"d3fe0784-f751-4e36-a8ce-804e95f72f12\") " pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.087883 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.540655 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r7nlb"] Jan 28 15:22:56 crc kubenswrapper[4959]: W0128 15:22:56.587463 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3fe0784_f751_4e36_a8ce_804e95f72f12.slice/crio-415ae076174e53dd0aef09af9353e8e2685d3be9e3e46d5d60c90cbe492ef84c WatchSource:0}: Error finding container 415ae076174e53dd0aef09af9353e8e2685d3be9e3e46d5d60c90cbe492ef84c: Status 404 returned error can't find the container with id 415ae076174e53dd0aef09af9353e8e2685d3be9e3e46d5d60c90cbe492ef84c Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.741419 4959 generic.go:334] "Generic (PLEG): container finished" podID="5185c08f-f7c9-4db8-a958-8a57a202824c" containerID="f60eec06d4824e27020aa12a6a46ff014247fcc35f7d709dc21b91b282cf0bb5" exitCode=0 Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.741627 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lzhkf" event={"ID":"5185c08f-f7c9-4db8-a958-8a57a202824c","Type":"ContainerDied","Data":"f60eec06d4824e27020aa12a6a46ff014247fcc35f7d709dc21b91b282cf0bb5"} Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.742266 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lzhkf" event={"ID":"5185c08f-f7c9-4db8-a958-8a57a202824c","Type":"ContainerStarted","Data":"b861c86ca41e2a0f8a18d4f8509e93e287b0de569478ef9d4037160d213ef113"} Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.752330 4959 generic.go:334] "Generic (PLEG): container finished" podID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerID="d8f3b83f4780b51907d71c992da67a414034ac4926c619784c6ff8849f0f7919" exitCode=0 Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.752443 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fp6bt" event={"ID":"5b88ba92-b8b0-4949-9885-c22425ad27be","Type":"ContainerDied","Data":"d8f3b83f4780b51907d71c992da67a414034ac4926c619784c6ff8849f0f7919"} Jan 28 15:22:56 crc kubenswrapper[4959]: I0128 15:22:56.756982 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7nlb" event={"ID":"d3fe0784-f751-4e36-a8ce-804e95f72f12","Type":"ContainerStarted","Data":"415ae076174e53dd0aef09af9353e8e2685d3be9e3e46d5d60c90cbe492ef84c"} Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.566403 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mf7qm"] Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.568404 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.571915 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.593737 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mf7qm"] Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.750131 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9302505-07e8-4fd5-b50f-fba85a6feb28-catalog-content\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.750567 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9302505-07e8-4fd5-b50f-fba85a6feb28-utilities\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.750622 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf5z6\" (UniqueName: \"kubernetes.io/projected/b9302505-07e8-4fd5-b50f-fba85a6feb28-kube-api-access-nf5z6\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.768745 4959 generic.go:334] "Generic (PLEG): container finished" podID="d3fe0784-f751-4e36-a8ce-804e95f72f12" containerID="e80212a63fe42c50027005a09f0736041365f082a28438daccdcc9e25145fba2" exitCode=0 Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.768807 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7nlb" event={"ID":"d3fe0784-f751-4e36-a8ce-804e95f72f12","Type":"ContainerDied","Data":"e80212a63fe42c50027005a09f0736041365f082a28438daccdcc9e25145fba2"} Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.852760 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9302505-07e8-4fd5-b50f-fba85a6feb28-catalog-content\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.852888 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9302505-07e8-4fd5-b50f-fba85a6feb28-utilities\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.852963 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf5z6\" (UniqueName: \"kubernetes.io/projected/b9302505-07e8-4fd5-b50f-fba85a6feb28-kube-api-access-nf5z6\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.853520 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9302505-07e8-4fd5-b50f-fba85a6feb28-utilities\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.853580 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9302505-07e8-4fd5-b50f-fba85a6feb28-catalog-content\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.881205 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf5z6\" (UniqueName: \"kubernetes.io/projected/b9302505-07e8-4fd5-b50f-fba85a6feb28-kube-api-access-nf5z6\") pod \"redhat-marketplace-mf7qm\" (UID: \"b9302505-07e8-4fd5-b50f-fba85a6feb28\") " pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:57 crc kubenswrapper[4959]: I0128 15:22:57.894318 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:22:58 crc kubenswrapper[4959]: I0128 15:22:58.689628 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:22:58 crc kubenswrapper[4959]: I0128 15:22:58.690226 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:22:58 crc kubenswrapper[4959]: I0128 15:22:58.779004 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fp6bt" event={"ID":"5b88ba92-b8b0-4949-9885-c22425ad27be","Type":"ContainerStarted","Data":"41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72"} Jan 28 15:22:58 crc kubenswrapper[4959]: I0128 15:22:58.806916 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fp6bt" podStartSLOduration=2.186600066 podStartE2EDuration="5.806889349s" podCreationTimestamp="2026-01-28 15:22:53 +0000 UTC" firstStartedPulling="2026-01-28 15:22:54.724986523 +0000 UTC m=+358.170892906" lastFinishedPulling="2026-01-28 15:22:58.345275816 +0000 UTC m=+361.791182189" observedRunningTime="2026-01-28 15:22:58.803591883 +0000 UTC m=+362.249498286" watchObservedRunningTime="2026-01-28 15:22:58.806889349 +0000 UTC m=+362.252795732" Jan 28 15:22:58 crc kubenswrapper[4959]: I0128 15:22:58.817214 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mf7qm"] Jan 28 15:22:59 crc kubenswrapper[4959]: I0128 15:22:59.787060 4959 generic.go:334] "Generic (PLEG): container finished" podID="5185c08f-f7c9-4db8-a958-8a57a202824c" containerID="1a562e522cc3016c0dba9ae7d7cd60efc86fc2344818ea5c4caaf3844d076426" exitCode=0 Jan 28 15:22:59 crc kubenswrapper[4959]: I0128 15:22:59.787163 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lzhkf" event={"ID":"5185c08f-f7c9-4db8-a958-8a57a202824c","Type":"ContainerDied","Data":"1a562e522cc3016c0dba9ae7d7cd60efc86fc2344818ea5c4caaf3844d076426"} Jan 28 15:22:59 crc kubenswrapper[4959]: I0128 15:22:59.790605 4959 generic.go:334] "Generic (PLEG): container finished" podID="b9302505-07e8-4fd5-b50f-fba85a6feb28" containerID="4f0a9307907783df04d807bc8963c6e1d9beb4197e8bd648eb7e1ecacf740d68" exitCode=0 Jan 28 15:22:59 crc kubenswrapper[4959]: I0128 15:22:59.790661 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf7qm" event={"ID":"b9302505-07e8-4fd5-b50f-fba85a6feb28","Type":"ContainerDied","Data":"4f0a9307907783df04d807bc8963c6e1d9beb4197e8bd648eb7e1ecacf740d68"} Jan 28 15:22:59 crc kubenswrapper[4959]: I0128 15:22:59.790684 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf7qm" event={"ID":"b9302505-07e8-4fd5-b50f-fba85a6feb28","Type":"ContainerStarted","Data":"ae95edf601fea2b6ceac5582aa43efcfcf96c46a0e8b919b6a55bf8c786c720b"} Jan 28 15:22:59 crc kubenswrapper[4959]: I0128 15:22:59.793233 4959 generic.go:334] "Generic (PLEG): container finished" podID="d3fe0784-f751-4e36-a8ce-804e95f72f12" containerID="0e8f1a64ba5a35cb80768fe0b4a5538b7ad4b3ee3c080e43190e6a4c043ced93" exitCode=0 Jan 28 15:22:59 crc kubenswrapper[4959]: I0128 15:22:59.794578 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7nlb" event={"ID":"d3fe0784-f751-4e36-a8ce-804e95f72f12","Type":"ContainerDied","Data":"0e8f1a64ba5a35cb80768fe0b4a5538b7ad4b3ee3c080e43190e6a4c043ced93"} Jan 28 15:23:00 crc kubenswrapper[4959]: I0128 15:23:00.810274 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lzhkf" event={"ID":"5185c08f-f7c9-4db8-a958-8a57a202824c","Type":"ContainerStarted","Data":"6913b73f2d918517c419fb535c77d82e1dff08d89d0f419cea4a969ea4b6da36"} Jan 28 15:23:00 crc kubenswrapper[4959]: I0128 15:23:00.813190 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf7qm" event={"ID":"b9302505-07e8-4fd5-b50f-fba85a6feb28","Type":"ContainerStarted","Data":"ec1486dee65bbe2db9d5533fe258d286798baea38b35c878784c43375580a46f"} Jan 28 15:23:00 crc kubenswrapper[4959]: I0128 15:23:00.837387 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lzhkf" podStartSLOduration=2.253406982 podStartE2EDuration="5.837348241s" podCreationTimestamp="2026-01-28 15:22:55 +0000 UTC" firstStartedPulling="2026-01-28 15:22:56.747835935 +0000 UTC m=+360.193742358" lastFinishedPulling="2026-01-28 15:23:00.331777234 +0000 UTC m=+363.777683617" observedRunningTime="2026-01-28 15:23:00.833972273 +0000 UTC m=+364.279878676" watchObservedRunningTime="2026-01-28 15:23:00.837348241 +0000 UTC m=+364.283254624" Jan 28 15:23:00 crc kubenswrapper[4959]: I0128 15:23:00.842719 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7nlb" event={"ID":"d3fe0784-f751-4e36-a8ce-804e95f72f12","Type":"ContainerStarted","Data":"3b5e95fc7837534111b2cfa8d9539f1e7c909622007b1412ff439b738f3990eb"} Jan 28 15:23:00 crc kubenswrapper[4959]: I0128 15:23:00.893418 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r7nlb" podStartSLOduration=3.9394954909999997 podStartE2EDuration="5.893398683s" podCreationTimestamp="2026-01-28 15:22:55 +0000 UTC" firstStartedPulling="2026-01-28 15:22:58.281025769 +0000 UTC m=+361.726932182" lastFinishedPulling="2026-01-28 15:23:00.234928991 +0000 UTC m=+363.680835374" observedRunningTime="2026-01-28 15:23:00.892465899 +0000 UTC m=+364.338372282" watchObservedRunningTime="2026-01-28 15:23:00.893398683 +0000 UTC m=+364.339305066" Jan 28 15:23:01 crc kubenswrapper[4959]: I0128 15:23:01.853084 4959 generic.go:334] "Generic (PLEG): container finished" podID="b9302505-07e8-4fd5-b50f-fba85a6feb28" containerID="ec1486dee65bbe2db9d5533fe258d286798baea38b35c878784c43375580a46f" exitCode=0 Jan 28 15:23:01 crc kubenswrapper[4959]: I0128 15:23:01.853315 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf7qm" event={"ID":"b9302505-07e8-4fd5-b50f-fba85a6feb28","Type":"ContainerDied","Data":"ec1486dee65bbe2db9d5533fe258d286798baea38b35c878784c43375580a46f"} Jan 28 15:23:03 crc kubenswrapper[4959]: I0128 15:23:03.708871 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:23:03 crc kubenswrapper[4959]: I0128 15:23:03.709846 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:23:03 crc kubenswrapper[4959]: I0128 15:23:03.868158 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mf7qm" event={"ID":"b9302505-07e8-4fd5-b50f-fba85a6feb28","Type":"ContainerStarted","Data":"c9d7658207e3823a5e816d845ed0412e8aa72226f920abdea3d915c8f54e9a23"} Jan 28 15:23:03 crc kubenswrapper[4959]: I0128 15:23:03.892525 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mf7qm" podStartSLOduration=4.386038221 podStartE2EDuration="6.892494634s" podCreationTimestamp="2026-01-28 15:22:57 +0000 UTC" firstStartedPulling="2026-01-28 15:22:59.791856536 +0000 UTC m=+363.237762919" lastFinishedPulling="2026-01-28 15:23:02.298312949 +0000 UTC m=+365.744219332" observedRunningTime="2026-01-28 15:23:03.887865692 +0000 UTC m=+367.333772095" watchObservedRunningTime="2026-01-28 15:23:03.892494634 +0000 UTC m=+367.338401017" Jan 28 15:23:04 crc kubenswrapper[4959]: I0128 15:23:04.759136 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fp6bt" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="registry-server" probeResult="failure" output=< Jan 28 15:23:04 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 15:23:04 crc kubenswrapper[4959]: > Jan 28 15:23:05 crc kubenswrapper[4959]: I0128 15:23:05.492216 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:23:05 crc kubenswrapper[4959]: I0128 15:23:05.492915 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:23:05 crc kubenswrapper[4959]: I0128 15:23:05.547428 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:23:05 crc kubenswrapper[4959]: I0128 15:23:05.934627 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lzhkf" Jan 28 15:23:06 crc kubenswrapper[4959]: I0128 15:23:06.087991 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:23:06 crc kubenswrapper[4959]: I0128 15:23:06.088055 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:23:06 crc kubenswrapper[4959]: I0128 15:23:06.125688 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:23:06 crc kubenswrapper[4959]: I0128 15:23:06.947936 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r7nlb" Jan 28 15:23:07 crc kubenswrapper[4959]: I0128 15:23:07.894543 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:23:07 crc kubenswrapper[4959]: I0128 15:23:07.894659 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:23:07 crc kubenswrapper[4959]: I0128 15:23:07.946785 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:23:11 crc kubenswrapper[4959]: I0128 15:23:11.001554 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-l4sqx" Jan 28 15:23:11 crc kubenswrapper[4959]: I0128 15:23:11.066099 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-l7lfr"] Jan 28 15:23:13 crc kubenswrapper[4959]: I0128 15:23:13.777078 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:23:13 crc kubenswrapper[4959]: I0128 15:23:13.833138 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:23:17 crc kubenswrapper[4959]: I0128 15:23:17.948218 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mf7qm" Jan 28 15:23:28 crc kubenswrapper[4959]: I0128 15:23:28.689294 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:23:28 crc kubenswrapper[4959]: I0128 15:23:28.690410 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.108839 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" podUID="c29792dd-faa1-4f0c-b405-e0de581ee26f" containerName="registry" containerID="cri-o://e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1" gracePeriod=30 Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.621732 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.693569 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"c29792dd-faa1-4f0c-b405-e0de581ee26f\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.693780 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-bound-sa-token\") pod \"c29792dd-faa1-4f0c-b405-e0de581ee26f\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.693885 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-trusted-ca\") pod \"c29792dd-faa1-4f0c-b405-e0de581ee26f\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.693946 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vszr\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-kube-api-access-2vszr\") pod \"c29792dd-faa1-4f0c-b405-e0de581ee26f\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.694059 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c29792dd-faa1-4f0c-b405-e0de581ee26f-installation-pull-secrets\") pod \"c29792dd-faa1-4f0c-b405-e0de581ee26f\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.694169 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-certificates\") pod \"c29792dd-faa1-4f0c-b405-e0de581ee26f\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.694262 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c29792dd-faa1-4f0c-b405-e0de581ee26f-ca-trust-extracted\") pod \"c29792dd-faa1-4f0c-b405-e0de581ee26f\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.694344 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-tls\") pod \"c29792dd-faa1-4f0c-b405-e0de581ee26f\" (UID: \"c29792dd-faa1-4f0c-b405-e0de581ee26f\") " Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.708195 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "c29792dd-faa1-4f0c-b405-e0de581ee26f" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.708263 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "c29792dd-faa1-4f0c-b405-e0de581ee26f" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.711757 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "c29792dd-faa1-4f0c-b405-e0de581ee26f" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.712403 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-kube-api-access-2vszr" (OuterVolumeSpecName: "kube-api-access-2vszr") pod "c29792dd-faa1-4f0c-b405-e0de581ee26f" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f"). InnerVolumeSpecName "kube-api-access-2vszr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.712932 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "c29792dd-faa1-4f0c-b405-e0de581ee26f" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.713605 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c29792dd-faa1-4f0c-b405-e0de581ee26f-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "c29792dd-faa1-4f0c-b405-e0de581ee26f" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.728614 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "c29792dd-faa1-4f0c-b405-e0de581ee26f" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.731417 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c29792dd-faa1-4f0c-b405-e0de581ee26f-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "c29792dd-faa1-4f0c-b405-e0de581ee26f" (UID: "c29792dd-faa1-4f0c-b405-e0de581ee26f"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.797023 4959 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c29792dd-faa1-4f0c-b405-e0de581ee26f-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.797063 4959 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.797074 4959 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c29792dd-faa1-4f0c-b405-e0de581ee26f-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.797085 4959 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.797096 4959 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.797121 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c29792dd-faa1-4f0c-b405-e0de581ee26f-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:23:36 crc kubenswrapper[4959]: I0128 15:23:36.797131 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vszr\" (UniqueName: \"kubernetes.io/projected/c29792dd-faa1-4f0c-b405-e0de581ee26f-kube-api-access-2vszr\") on node \"crc\" DevicePath \"\"" Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.128200 4959 generic.go:334] "Generic (PLEG): container finished" podID="c29792dd-faa1-4f0c-b405-e0de581ee26f" containerID="e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1" exitCode=0 Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.128270 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" event={"ID":"c29792dd-faa1-4f0c-b405-e0de581ee26f","Type":"ContainerDied","Data":"e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1"} Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.128304 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" event={"ID":"c29792dd-faa1-4f0c-b405-e0de581ee26f","Type":"ContainerDied","Data":"dd115c1dbb0fc44419a060935abf8be3bef37c45ad2f4cf33659375cde8956ea"} Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.128323 4959 scope.go:117] "RemoveContainer" containerID="e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1" Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.128360 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-l7lfr" Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.150159 4959 scope.go:117] "RemoveContainer" containerID="e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1" Jan 28 15:23:37 crc kubenswrapper[4959]: E0128 15:23:37.150824 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1\": container with ID starting with e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1 not found: ID does not exist" containerID="e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1" Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.150893 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1"} err="failed to get container status \"e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1\": rpc error: code = NotFound desc = could not find container \"e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1\": container with ID starting with e01bb70cdbe981d9b3a75a7522e9d18adfa9721935485cfd1b4af411615b21b1 not found: ID does not exist" Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.174730 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-l7lfr"] Jan 28 15:23:37 crc kubenswrapper[4959]: I0128 15:23:37.180325 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-l7lfr"] Jan 28 15:23:38 crc kubenswrapper[4959]: I0128 15:23:38.595986 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c29792dd-faa1-4f0c-b405-e0de581ee26f" path="/var/lib/kubelet/pods/c29792dd-faa1-4f0c-b405-e0de581ee26f/volumes" Jan 28 15:23:58 crc kubenswrapper[4959]: I0128 15:23:58.689556 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:23:58 crc kubenswrapper[4959]: I0128 15:23:58.690348 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:23:58 crc kubenswrapper[4959]: I0128 15:23:58.690408 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:23:58 crc kubenswrapper[4959]: I0128 15:23:58.691154 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2721ee3c317d213a4abd4520e3756a7317e857dd25f041e3dd5a379f57b76dad"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:23:58 crc kubenswrapper[4959]: I0128 15:23:58.691205 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://2721ee3c317d213a4abd4520e3756a7317e857dd25f041e3dd5a379f57b76dad" gracePeriod=600 Jan 28 15:23:59 crc kubenswrapper[4959]: I0128 15:23:59.287753 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="2721ee3c317d213a4abd4520e3756a7317e857dd25f041e3dd5a379f57b76dad" exitCode=0 Jan 28 15:23:59 crc kubenswrapper[4959]: I0128 15:23:59.287827 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"2721ee3c317d213a4abd4520e3756a7317e857dd25f041e3dd5a379f57b76dad"} Jan 28 15:23:59 crc kubenswrapper[4959]: I0128 15:23:59.288353 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"e5d004ca357a0dc2c6c28e91b74898a337366c10d600aff9af5ea78d94bfa6c7"} Jan 28 15:23:59 crc kubenswrapper[4959]: I0128 15:23:59.288396 4959 scope.go:117] "RemoveContainer" containerID="feb9c22642900a13d8c10638470214256f41fa4fa1c2864ee43a8ac26487a259" Jan 28 15:24:00 crc kubenswrapper[4959]: I0128 15:24:00.938949 4959 scope.go:117] "RemoveContainer" containerID="80af1fc06c48bd10ea7a6751461969b200aaab26d773ce1bdc68686e8b75c99e" Jan 28 15:24:00 crc kubenswrapper[4959]: I0128 15:24:00.966874 4959 scope.go:117] "RemoveContainer" containerID="35e91256cfe5aeb2b01b6061f1e99e66ab5480ec661fb428a41dd2de2088d881" Jan 28 15:24:00 crc kubenswrapper[4959]: I0128 15:24:00.987134 4959 scope.go:117] "RemoveContainer" containerID="0a4a3366fa676437b55d51b04f313d77b927ded2aa45bfa80c0ad54e59d37a98" Jan 28 15:24:01 crc kubenswrapper[4959]: I0128 15:24:01.007310 4959 scope.go:117] "RemoveContainer" containerID="edf1735470ec9a45b858bf7b706b70f6c7d49a5b647e56c74e7c7ef7ec16883e" Jan 28 15:24:01 crc kubenswrapper[4959]: I0128 15:24:01.028876 4959 scope.go:117] "RemoveContainer" containerID="e7ccd3b12954b9fa54cdd3d001b06c038e8fb06a6ae1fa2d34f1382d4c92392e" Jan 28 15:24:01 crc kubenswrapper[4959]: I0128 15:24:01.052262 4959 scope.go:117] "RemoveContainer" containerID="4030070ea33f480036f263b031e9f2667f8212d2b8acc9c7aff5b6a28ca15688" Jan 28 15:26:28 crc kubenswrapper[4959]: I0128 15:26:28.689233 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:26:28 crc kubenswrapper[4959]: I0128 15:26:28.690040 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:26:58 crc kubenswrapper[4959]: I0128 15:26:58.689246 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:26:58 crc kubenswrapper[4959]: I0128 15:26:58.690133 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:27:01 crc kubenswrapper[4959]: I0128 15:27:01.128217 4959 scope.go:117] "RemoveContainer" containerID="2a6c7a509f4ae08fdd998f4e592f3898e64998b62897ec24c06a6726cf35a0e9" Jan 28 15:27:01 crc kubenswrapper[4959]: I0128 15:27:01.158669 4959 scope.go:117] "RemoveContainer" containerID="e1eaa360e69834375a5b95d21ed0ed563652d892feb84bd35e543ef4d066ab52" Jan 28 15:27:28 crc kubenswrapper[4959]: I0128 15:27:28.689605 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:27:28 crc kubenswrapper[4959]: I0128 15:27:28.690492 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:27:28 crc kubenswrapper[4959]: I0128 15:27:28.690551 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:27:28 crc kubenswrapper[4959]: I0128 15:27:28.691462 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e5d004ca357a0dc2c6c28e91b74898a337366c10d600aff9af5ea78d94bfa6c7"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:27:28 crc kubenswrapper[4959]: I0128 15:27:28.691516 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://e5d004ca357a0dc2c6c28e91b74898a337366c10d600aff9af5ea78d94bfa6c7" gracePeriod=600 Jan 28 15:27:29 crc kubenswrapper[4959]: I0128 15:27:29.534832 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="e5d004ca357a0dc2c6c28e91b74898a337366c10d600aff9af5ea78d94bfa6c7" exitCode=0 Jan 28 15:27:29 crc kubenswrapper[4959]: I0128 15:27:29.534948 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"e5d004ca357a0dc2c6c28e91b74898a337366c10d600aff9af5ea78d94bfa6c7"} Jan 28 15:27:29 crc kubenswrapper[4959]: I0128 15:27:29.535478 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"5190babcd72aa8d406516ffe324e9eaebbd4bede0a5bd30239f36eb74204fed6"} Jan 28 15:27:29 crc kubenswrapper[4959]: I0128 15:27:29.535523 4959 scope.go:117] "RemoveContainer" containerID="2721ee3c317d213a4abd4520e3756a7317e857dd25f041e3dd5a379f57b76dad" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.650566 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq"] Jan 28 15:28:35 crc kubenswrapper[4959]: E0128 15:28:35.651659 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29792dd-faa1-4f0c-b405-e0de581ee26f" containerName="registry" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.651678 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29792dd-faa1-4f0c-b405-e0de581ee26f" containerName="registry" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.651820 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="c29792dd-faa1-4f0c-b405-e0de581ee26f" containerName="registry" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.652454 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.658194 4959 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-jpg5c" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.658274 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.658345 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.659889 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq"] Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.670044 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-mhfkm"] Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.670845 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-mhfkm" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.673099 4959 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-mc4l5" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.694571 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-mhfkm"] Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.704864 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4m52p"] Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.705627 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.710921 4959 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-bt4tw" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.736369 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d494g\" (UniqueName: \"kubernetes.io/projected/11a69736-0d82-4d12-8618-7dacd4800aac-kube-api-access-d494g\") pod \"cert-manager-webhook-687f57d79b-4m52p\" (UID: \"11a69736-0d82-4d12-8618-7dacd4800aac\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.736438 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpb9c\" (UniqueName: \"kubernetes.io/projected/f8b4d3bd-6f12-4998-9613-1e9e1e092cbe-kube-api-access-xpb9c\") pod \"cert-manager-858654f9db-mhfkm\" (UID: \"f8b4d3bd-6f12-4998-9613-1e9e1e092cbe\") " pod="cert-manager/cert-manager-858654f9db-mhfkm" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.736518 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w54mx\" (UniqueName: \"kubernetes.io/projected/27a0974f-8766-47d5-aea4-65d8abada350-kube-api-access-w54mx\") pod \"cert-manager-cainjector-cf98fcc89-5xkqq\" (UID: \"27a0974f-8766-47d5-aea4-65d8abada350\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.738512 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4m52p"] Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.837396 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpb9c\" (UniqueName: \"kubernetes.io/projected/f8b4d3bd-6f12-4998-9613-1e9e1e092cbe-kube-api-access-xpb9c\") pod \"cert-manager-858654f9db-mhfkm\" (UID: \"f8b4d3bd-6f12-4998-9613-1e9e1e092cbe\") " pod="cert-manager/cert-manager-858654f9db-mhfkm" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.837453 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w54mx\" (UniqueName: \"kubernetes.io/projected/27a0974f-8766-47d5-aea4-65d8abada350-kube-api-access-w54mx\") pod \"cert-manager-cainjector-cf98fcc89-5xkqq\" (UID: \"27a0974f-8766-47d5-aea4-65d8abada350\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.837545 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d494g\" (UniqueName: \"kubernetes.io/projected/11a69736-0d82-4d12-8618-7dacd4800aac-kube-api-access-d494g\") pod \"cert-manager-webhook-687f57d79b-4m52p\" (UID: \"11a69736-0d82-4d12-8618-7dacd4800aac\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.859458 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d494g\" (UniqueName: \"kubernetes.io/projected/11a69736-0d82-4d12-8618-7dacd4800aac-kube-api-access-d494g\") pod \"cert-manager-webhook-687f57d79b-4m52p\" (UID: \"11a69736-0d82-4d12-8618-7dacd4800aac\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.859500 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w54mx\" (UniqueName: \"kubernetes.io/projected/27a0974f-8766-47d5-aea4-65d8abada350-kube-api-access-w54mx\") pod \"cert-manager-cainjector-cf98fcc89-5xkqq\" (UID: \"27a0974f-8766-47d5-aea4-65d8abada350\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.862825 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpb9c\" (UniqueName: \"kubernetes.io/projected/f8b4d3bd-6f12-4998-9613-1e9e1e092cbe-kube-api-access-xpb9c\") pod \"cert-manager-858654f9db-mhfkm\" (UID: \"f8b4d3bd-6f12-4998-9613-1e9e1e092cbe\") " pod="cert-manager/cert-manager-858654f9db-mhfkm" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.974862 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq" Jan 28 15:28:35 crc kubenswrapper[4959]: I0128 15:28:35.986725 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-mhfkm" Jan 28 15:28:36 crc kubenswrapper[4959]: I0128 15:28:36.023829 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" Jan 28 15:28:36 crc kubenswrapper[4959]: I0128 15:28:36.226037 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-mhfkm"] Jan 28 15:28:36 crc kubenswrapper[4959]: I0128 15:28:36.243042 4959 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 15:28:36 crc kubenswrapper[4959]: I0128 15:28:36.287342 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq"] Jan 28 15:28:36 crc kubenswrapper[4959]: W0128 15:28:36.297780 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27a0974f_8766_47d5_aea4_65d8abada350.slice/crio-6ad31c739319fc2efd165f7eef8d44bfd82fe098ff352e0c068cbcd2d3ab9d2d WatchSource:0}: Error finding container 6ad31c739319fc2efd165f7eef8d44bfd82fe098ff352e0c068cbcd2d3ab9d2d: Status 404 returned error can't find the container with id 6ad31c739319fc2efd165f7eef8d44bfd82fe098ff352e0c068cbcd2d3ab9d2d Jan 28 15:28:36 crc kubenswrapper[4959]: I0128 15:28:36.348426 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4m52p"] Jan 28 15:28:36 crc kubenswrapper[4959]: W0128 15:28:36.355762 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11a69736_0d82_4d12_8618_7dacd4800aac.slice/crio-72e6fbe4a0a23bbdb39e3610da773923d1ba6668ee546b34bfd7dfddab347c7b WatchSource:0}: Error finding container 72e6fbe4a0a23bbdb39e3610da773923d1ba6668ee546b34bfd7dfddab347c7b: Status 404 returned error can't find the container with id 72e6fbe4a0a23bbdb39e3610da773923d1ba6668ee546b34bfd7dfddab347c7b Jan 28 15:28:36 crc kubenswrapper[4959]: I0128 15:28:36.995663 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" event={"ID":"11a69736-0d82-4d12-8618-7dacd4800aac","Type":"ContainerStarted","Data":"72e6fbe4a0a23bbdb39e3610da773923d1ba6668ee546b34bfd7dfddab347c7b"} Jan 28 15:28:36 crc kubenswrapper[4959]: I0128 15:28:36.996667 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-mhfkm" event={"ID":"f8b4d3bd-6f12-4998-9613-1e9e1e092cbe","Type":"ContainerStarted","Data":"0ec2f6441aa427bef0410e8fa7322e03d71e18b612432b236eb75cfd52d77c9d"} Jan 28 15:28:36 crc kubenswrapper[4959]: I0128 15:28:36.997338 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq" event={"ID":"27a0974f-8766-47d5-aea4-65d8abada350","Type":"ContainerStarted","Data":"6ad31c739319fc2efd165f7eef8d44bfd82fe098ff352e0c068cbcd2d3ab9d2d"} Jan 28 15:28:43 crc kubenswrapper[4959]: I0128 15:28:43.032553 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq" event={"ID":"27a0974f-8766-47d5-aea4-65d8abada350","Type":"ContainerStarted","Data":"a705c989d0ebabedb62a8880492f0b55f327e8dd5a08ac4ac5ea4e703a50e492"} Jan 28 15:28:43 crc kubenswrapper[4959]: I0128 15:28:43.035132 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" event={"ID":"11a69736-0d82-4d12-8618-7dacd4800aac","Type":"ContainerStarted","Data":"e4e87120b728aa6c9cd51a209bedab5537abd28d6ac5ad778d1595dedf06efaf"} Jan 28 15:28:43 crc kubenswrapper[4959]: I0128 15:28:43.035270 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" Jan 28 15:28:43 crc kubenswrapper[4959]: I0128 15:28:43.036637 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-mhfkm" event={"ID":"f8b4d3bd-6f12-4998-9613-1e9e1e092cbe","Type":"ContainerStarted","Data":"2073201f19fcf6bef917bb6afe854c5f8b5ce068d64b305aa2937562a88e99af"} Jan 28 15:28:43 crc kubenswrapper[4959]: I0128 15:28:43.050753 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-5xkqq" podStartSLOduration=2.282825024 podStartE2EDuration="8.050725078s" podCreationTimestamp="2026-01-28 15:28:35 +0000 UTC" firstStartedPulling="2026-01-28 15:28:36.300143009 +0000 UTC m=+699.746049392" lastFinishedPulling="2026-01-28 15:28:42.068043073 +0000 UTC m=+705.513949446" observedRunningTime="2026-01-28 15:28:43.04716055 +0000 UTC m=+706.493066933" watchObservedRunningTime="2026-01-28 15:28:43.050725078 +0000 UTC m=+706.496631461" Jan 28 15:28:43 crc kubenswrapper[4959]: I0128 15:28:43.076498 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" podStartSLOduration=2.355591643 podStartE2EDuration="8.076471477s" podCreationTimestamp="2026-01-28 15:28:35 +0000 UTC" firstStartedPulling="2026-01-28 15:28:36.359170762 +0000 UTC m=+699.805077145" lastFinishedPulling="2026-01-28 15:28:42.080050596 +0000 UTC m=+705.525956979" observedRunningTime="2026-01-28 15:28:43.076444416 +0000 UTC m=+706.522350819" watchObservedRunningTime="2026-01-28 15:28:43.076471477 +0000 UTC m=+706.522377880" Jan 28 15:28:43 crc kubenswrapper[4959]: I0128 15:28:43.104726 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-mhfkm" podStartSLOduration=2.2704148699999998 podStartE2EDuration="8.104701017s" podCreationTimestamp="2026-01-28 15:28:35 +0000 UTC" firstStartedPulling="2026-01-28 15:28:36.242811897 +0000 UTC m=+699.688718280" lastFinishedPulling="2026-01-28 15:28:42.077098044 +0000 UTC m=+705.523004427" observedRunningTime="2026-01-28 15:28:43.103223161 +0000 UTC m=+706.549129604" watchObservedRunningTime="2026-01-28 15:28:43.104701017 +0000 UTC m=+706.550607400" Jan 28 15:28:51 crc kubenswrapper[4959]: I0128 15:28:51.027658 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-4m52p" Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.043606 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mvzjl"] Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.044715 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovn-controller" containerID="cri-o://cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d" gracePeriod=30 Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.044801 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="nbdb" containerID="cri-o://ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71" gracePeriod=30 Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.044897 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="sbdb" containerID="cri-o://5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3" gracePeriod=30 Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.044885 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovn-acl-logging" containerID="cri-o://c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856" gracePeriod=30 Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.044949 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kube-rbac-proxy-node" containerID="cri-o://247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf" gracePeriod=30 Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.045021 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5" gracePeriod=30 Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.045069 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="northd" containerID="cri-o://0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4" gracePeriod=30 Jan 28 15:28:56 crc kubenswrapper[4959]: I0128 15:28:56.082338 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" containerID="cri-o://d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" gracePeriod=30 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.003866 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/3.log" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.007257 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovn-acl-logging/0.log" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.007892 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovn-controller/0.log" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.008667 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059613 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-netns\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059685 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-openvswitch\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059712 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-bin\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059738 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-systemd\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059763 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-kubelet\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059789 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-etc-openvswitch\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059806 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-log-socket\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059833 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059870 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-script-lib\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059910 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-var-lib-openvswitch\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059935 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-ovn\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059956 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-netd\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.059985 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-env-overrides\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060009 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-ovn-kubernetes\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060042 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-node-log\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060067 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-systemd-units\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060181 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovn-node-metrics-cert\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060243 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-config\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060271 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5t2sz\" (UniqueName: \"kubernetes.io/projected/1bad991a-9aad-4e7b-abdd-7d23124f60a8-kube-api-access-5t2sz\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060324 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-slash\") pod \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\" (UID: \"1bad991a-9aad-4e7b-abdd-7d23124f60a8\") " Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060400 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060462 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060570 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060735 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-node-log" (OuterVolumeSpecName: "node-log") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060925 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.060979 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.061025 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.061072 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.061174 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.061224 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.061262 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.062856 4959 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.062908 4959 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.062927 4959 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.062997 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-slash" (OuterVolumeSpecName: "host-slash") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.063035 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.063060 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.063081 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-log-socket" (OuterVolumeSpecName: "log-socket") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.063127 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.063640 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.073515 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bad991a-9aad-4e7b-abdd-7d23124f60a8-kube-api-access-5t2sz" (OuterVolumeSpecName: "kube-api-access-5t2sz") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "kube-api-access-5t2sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.074554 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080458 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bh658"] Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080773 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="sbdb" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080796 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="sbdb" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080808 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="northd" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080816 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="northd" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080828 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080837 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080848 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080855 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080866 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080873 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080883 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kubecfg-setup" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080890 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kubecfg-setup" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080901 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080911 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080919 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovn-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080926 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovn-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080941 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080948 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080960 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kube-rbac-proxy-node" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080968 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kube-rbac-proxy-node" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080978 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="nbdb" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.080988 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="nbdb" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.080997 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovn-acl-logging" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081004 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovn-acl-logging" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081155 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081168 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081175 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="sbdb" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081188 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081198 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081207 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081214 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="nbdb" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081459 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovn-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081470 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="northd" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081481 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="kube-rbac-proxy-node" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081516 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovn-acl-logging" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.081636 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081646 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.081760 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerName="ovnkube-controller" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.084354 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.089589 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "1bad991a-9aad-4e7b-abdd-7d23124f60a8" (UID: "1bad991a-9aad-4e7b-abdd-7d23124f60a8"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.128726 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovnkube-controller/3.log" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.130852 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovn-acl-logging/0.log" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131404 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mvzjl_1bad991a-9aad-4e7b-abdd-7d23124f60a8/ovn-controller/0.log" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131748 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" exitCode=0 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131778 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3" exitCode=0 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131789 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71" exitCode=0 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131801 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4" exitCode=0 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131809 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5" exitCode=0 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131818 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf" exitCode=0 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131829 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856" exitCode=143 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131838 4959 generic.go:334] "Generic (PLEG): container finished" podID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" containerID="cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d" exitCode=143 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131848 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131842 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131896 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131920 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131931 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131942 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131953 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131968 4959 scope.go:117] "RemoveContainer" containerID="d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.131966 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132080 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132091 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132097 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132117 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132123 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132129 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132134 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132139 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132149 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132164 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132171 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132178 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132184 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132337 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132344 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132350 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132357 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132363 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132370 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132381 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132393 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132401 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132408 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132414 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132421 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132427 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132434 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132441 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132448 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132454 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132464 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mvzjl" event={"ID":"1bad991a-9aad-4e7b-abdd-7d23124f60a8","Type":"ContainerDied","Data":"c456faa3ab9a0ef3260a9e81acb4cc41cdb2f4be9096ea70b82eb409a019e4b2"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132474 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132481 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132488 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132495 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132501 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132509 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132516 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132523 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132529 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.132536 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.135731 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/2.log" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.144725 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/1.log" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.144791 4959 generic.go:334] "Generic (PLEG): container finished" podID="1c1dca0a-c782-43f9-9390-7dc9c5311b97" containerID="2ce5ba0a6fc65f5c4deba9c42f6a6ee7c031bd8945252d3ceb3501661be8235b" exitCode=2 Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.144836 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bbjnj" event={"ID":"1c1dca0a-c782-43f9-9390-7dc9c5311b97","Type":"ContainerDied","Data":"2ce5ba0a6fc65f5c4deba9c42f6a6ee7c031bd8945252d3ceb3501661be8235b"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.144868 4959 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d"} Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.145387 4959 scope.go:117] "RemoveContainer" containerID="2ce5ba0a6fc65f5c4deba9c42f6a6ee7c031bd8945252d3ceb3501661be8235b" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.145688 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-bbjnj_openshift-multus(1c1dca0a-c782-43f9-9390-7dc9c5311b97)\"" pod="openshift-multus/multus-bbjnj" podUID="1c1dca0a-c782-43f9-9390-7dc9c5311b97" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.157680 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.163831 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-cni-netd\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.163891 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnfqb\" (UniqueName: \"kubernetes.io/projected/feab8fcd-7eb5-410a-960c-5485ad3d6e96-kube-api-access-mnfqb\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.163947 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-etc-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.163971 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovn-node-metrics-cert\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.163992 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-cni-bin\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164010 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-systemd\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164027 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164052 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovnkube-config\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164069 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-node-log\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164122 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164146 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-run-netns\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164161 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-slash\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164182 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-systemd-units\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164242 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-run-ovn-kubernetes\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164260 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-var-lib-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164275 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-kubelet\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164290 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-env-overrides\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164308 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovnkube-script-lib\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164327 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-log-socket\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164346 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-ovn\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164384 4959 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164395 4959 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164406 4959 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-node-log\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164416 4959 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164429 4959 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164443 4959 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164454 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5t2sz\" (UniqueName: \"kubernetes.io/projected/1bad991a-9aad-4e7b-abdd-7d23124f60a8-kube-api-access-5t2sz\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164464 4959 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-slash\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164472 4959 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164480 4959 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164489 4959 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164498 4959 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164506 4959 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164515 4959 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164524 4959 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-log-socket\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164534 4959 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1bad991a-9aad-4e7b-abdd-7d23124f60a8-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.164543 4959 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1bad991a-9aad-4e7b-abdd-7d23124f60a8-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.199387 4959 scope.go:117] "RemoveContainer" containerID="5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.220167 4959 scope.go:117] "RemoveContainer" containerID="ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.221240 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mvzjl"] Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.224877 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mvzjl"] Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.234563 4959 scope.go:117] "RemoveContainer" containerID="0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.250323 4959 scope.go:117] "RemoveContainer" containerID="ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265598 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovnkube-script-lib\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265633 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-env-overrides\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265661 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-log-socket\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265681 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-ovn\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265701 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-cni-netd\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265725 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnfqb\" (UniqueName: \"kubernetes.io/projected/feab8fcd-7eb5-410a-960c-5485ad3d6e96-kube-api-access-mnfqb\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265749 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-etc-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265765 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovn-node-metrics-cert\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265779 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-cni-bin\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265797 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-systemd\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265813 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265834 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovnkube-config\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265851 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-node-log\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265869 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265887 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-run-netns\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265909 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-slash\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265929 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-systemd-units\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265959 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-var-lib-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265977 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-run-ovn-kubernetes\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.265997 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-kubelet\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266074 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-kubelet\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266171 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-systemd\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266312 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-run-netns\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266383 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-node-log\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266430 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266481 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266656 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-env-overrides\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266701 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-cni-bin\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266725 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-systemd-units\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266747 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-slash\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266770 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-var-lib-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266795 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-run-ovn\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266827 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-log-socket\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266853 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-etc-openvswitch\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266862 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-run-ovn-kubernetes\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.266959 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/feab8fcd-7eb5-410a-960c-5485ad3d6e96-host-cni-netd\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.267283 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovnkube-config\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.267371 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovnkube-script-lib\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.268225 4959 scope.go:117] "RemoveContainer" containerID="247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.272374 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/feab8fcd-7eb5-410a-960c-5485ad3d6e96-ovn-node-metrics-cert\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.282781 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnfqb\" (UniqueName: \"kubernetes.io/projected/feab8fcd-7eb5-410a-960c-5485ad3d6e96-kube-api-access-mnfqb\") pod \"ovnkube-node-bh658\" (UID: \"feab8fcd-7eb5-410a-960c-5485ad3d6e96\") " pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.311487 4959 scope.go:117] "RemoveContainer" containerID="c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.325804 4959 scope.go:117] "RemoveContainer" containerID="cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.341404 4959 scope.go:117] "RemoveContainer" containerID="24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.354340 4959 scope.go:117] "RemoveContainer" containerID="d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.354738 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": container with ID starting with d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129 not found: ID does not exist" containerID="d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.354787 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} err="failed to get container status \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": rpc error: code = NotFound desc = could not find container \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": container with ID starting with d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.354822 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.355226 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": container with ID starting with d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7 not found: ID does not exist" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.355268 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} err="failed to get container status \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": rpc error: code = NotFound desc = could not find container \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": container with ID starting with d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.355298 4959 scope.go:117] "RemoveContainer" containerID="5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.355570 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": container with ID starting with 5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3 not found: ID does not exist" containerID="5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.355612 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} err="failed to get container status \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": rpc error: code = NotFound desc = could not find container \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": container with ID starting with 5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.355635 4959 scope.go:117] "RemoveContainer" containerID="ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.355963 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": container with ID starting with ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71 not found: ID does not exist" containerID="ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.355991 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} err="failed to get container status \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": rpc error: code = NotFound desc = could not find container \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": container with ID starting with ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.356005 4959 scope.go:117] "RemoveContainer" containerID="0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.356523 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": container with ID starting with 0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4 not found: ID does not exist" containerID="0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.356561 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} err="failed to get container status \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": rpc error: code = NotFound desc = could not find container \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": container with ID starting with 0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.356591 4959 scope.go:117] "RemoveContainer" containerID="ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.356898 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": container with ID starting with ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5 not found: ID does not exist" containerID="ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.356983 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} err="failed to get container status \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": rpc error: code = NotFound desc = could not find container \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": container with ID starting with ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.357008 4959 scope.go:117] "RemoveContainer" containerID="247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.357271 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": container with ID starting with 247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf not found: ID does not exist" containerID="247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.357293 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} err="failed to get container status \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": rpc error: code = NotFound desc = could not find container \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": container with ID starting with 247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.357308 4959 scope.go:117] "RemoveContainer" containerID="c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.357515 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": container with ID starting with c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856 not found: ID does not exist" containerID="c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.357534 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} err="failed to get container status \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": rpc error: code = NotFound desc = could not find container \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": container with ID starting with c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.357549 4959 scope.go:117] "RemoveContainer" containerID="cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.357761 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": container with ID starting with cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d not found: ID does not exist" containerID="cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.357788 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} err="failed to get container status \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": rpc error: code = NotFound desc = could not find container \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": container with ID starting with cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.357808 4959 scope.go:117] "RemoveContainer" containerID="24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf" Jan 28 15:28:57 crc kubenswrapper[4959]: E0128 15:28:57.358234 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": container with ID starting with 24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf not found: ID does not exist" containerID="24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.358274 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} err="failed to get container status \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": rpc error: code = NotFound desc = could not find container \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": container with ID starting with 24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.358297 4959 scope.go:117] "RemoveContainer" containerID="d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.358627 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} err="failed to get container status \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": rpc error: code = NotFound desc = could not find container \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": container with ID starting with d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.358648 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.358996 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} err="failed to get container status \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": rpc error: code = NotFound desc = could not find container \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": container with ID starting with d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.359020 4959 scope.go:117] "RemoveContainer" containerID="5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.359322 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} err="failed to get container status \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": rpc error: code = NotFound desc = could not find container \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": container with ID starting with 5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.359343 4959 scope.go:117] "RemoveContainer" containerID="ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.359572 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} err="failed to get container status \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": rpc error: code = NotFound desc = could not find container \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": container with ID starting with ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.359610 4959 scope.go:117] "RemoveContainer" containerID="0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.361197 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} err="failed to get container status \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": rpc error: code = NotFound desc = could not find container \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": container with ID starting with 0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.361222 4959 scope.go:117] "RemoveContainer" containerID="ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.361476 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} err="failed to get container status \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": rpc error: code = NotFound desc = could not find container \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": container with ID starting with ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.361501 4959 scope.go:117] "RemoveContainer" containerID="247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.361735 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} err="failed to get container status \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": rpc error: code = NotFound desc = could not find container \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": container with ID starting with 247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.361767 4959 scope.go:117] "RemoveContainer" containerID="c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.361996 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} err="failed to get container status \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": rpc error: code = NotFound desc = could not find container \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": container with ID starting with c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.362016 4959 scope.go:117] "RemoveContainer" containerID="cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.362348 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} err="failed to get container status \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": rpc error: code = NotFound desc = could not find container \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": container with ID starting with cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.362388 4959 scope.go:117] "RemoveContainer" containerID="24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.362667 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} err="failed to get container status \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": rpc error: code = NotFound desc = could not find container \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": container with ID starting with 24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.362692 4959 scope.go:117] "RemoveContainer" containerID="d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.362966 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} err="failed to get container status \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": rpc error: code = NotFound desc = could not find container \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": container with ID starting with d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.362999 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.363243 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} err="failed to get container status \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": rpc error: code = NotFound desc = could not find container \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": container with ID starting with d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.363265 4959 scope.go:117] "RemoveContainer" containerID="5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.363534 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} err="failed to get container status \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": rpc error: code = NotFound desc = could not find container \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": container with ID starting with 5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.363557 4959 scope.go:117] "RemoveContainer" containerID="ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.364940 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} err="failed to get container status \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": rpc error: code = NotFound desc = could not find container \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": container with ID starting with ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.364973 4959 scope.go:117] "RemoveContainer" containerID="0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.365305 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} err="failed to get container status \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": rpc error: code = NotFound desc = could not find container \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": container with ID starting with 0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.365327 4959 scope.go:117] "RemoveContainer" containerID="ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.365626 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} err="failed to get container status \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": rpc error: code = NotFound desc = could not find container \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": container with ID starting with ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.365652 4959 scope.go:117] "RemoveContainer" containerID="247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.365991 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} err="failed to get container status \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": rpc error: code = NotFound desc = could not find container \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": container with ID starting with 247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.366053 4959 scope.go:117] "RemoveContainer" containerID="c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.366714 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} err="failed to get container status \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": rpc error: code = NotFound desc = could not find container \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": container with ID starting with c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.366738 4959 scope.go:117] "RemoveContainer" containerID="cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.367018 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} err="failed to get container status \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": rpc error: code = NotFound desc = could not find container \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": container with ID starting with cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.367057 4959 scope.go:117] "RemoveContainer" containerID="24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.367461 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} err="failed to get container status \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": rpc error: code = NotFound desc = could not find container \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": container with ID starting with 24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.367491 4959 scope.go:117] "RemoveContainer" containerID="d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.367846 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} err="failed to get container status \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": rpc error: code = NotFound desc = could not find container \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": container with ID starting with d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.367874 4959 scope.go:117] "RemoveContainer" containerID="d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.368323 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7"} err="failed to get container status \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": rpc error: code = NotFound desc = could not find container \"d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7\": container with ID starting with d42d66af941d482627e7c899b4cacf447868c6364cf0daa5ea3544c4522886b7 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.368347 4959 scope.go:117] "RemoveContainer" containerID="5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.368646 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3"} err="failed to get container status \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": rpc error: code = NotFound desc = could not find container \"5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3\": container with ID starting with 5788f78dd164816e074f50ee060a6b8ef0382b1b584fc3fe0b82d17df7f0b8d3 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.368681 4959 scope.go:117] "RemoveContainer" containerID="ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.369021 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71"} err="failed to get container status \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": rpc error: code = NotFound desc = could not find container \"ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71\": container with ID starting with ec0e9887a522833f26b8c2e48ed5a605d001db8e4c2b29d8cd4dc07db99a0b71 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.369043 4959 scope.go:117] "RemoveContainer" containerID="0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.369302 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4"} err="failed to get container status \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": rpc error: code = NotFound desc = could not find container \"0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4\": container with ID starting with 0becf7155d8abeec137442b4272cf70312cb16f3324284484ab2c5a0f1f9d1c4 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.369347 4959 scope.go:117] "RemoveContainer" containerID="ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.369692 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5"} err="failed to get container status \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": rpc error: code = NotFound desc = could not find container \"ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5\": container with ID starting with ecfe8fed8238376e5954f3ee51719ff032af0c21cadf81019ae41f13c9f0fef5 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.369720 4959 scope.go:117] "RemoveContainer" containerID="247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.370127 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf"} err="failed to get container status \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": rpc error: code = NotFound desc = could not find container \"247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf\": container with ID starting with 247afbe152dbbb254065a4df0a65901cd83b92a1a86003c6f647608881b05bdf not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.370166 4959 scope.go:117] "RemoveContainer" containerID="c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.370496 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856"} err="failed to get container status \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": rpc error: code = NotFound desc = could not find container \"c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856\": container with ID starting with c71ed381046bcaaf5c1a3594a016321a9022f8544fd15009d8df17979cb99856 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.370513 4959 scope.go:117] "RemoveContainer" containerID="cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.370771 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d"} err="failed to get container status \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": rpc error: code = NotFound desc = could not find container \"cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d\": container with ID starting with cfebb7cb1160cab3c7ae6d8b9691d58bb6041b01b00f5e8adc6c546e452eb91d not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.370789 4959 scope.go:117] "RemoveContainer" containerID="24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.371592 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf"} err="failed to get container status \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": rpc error: code = NotFound desc = could not find container \"24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf\": container with ID starting with 24456da730eefb01333d6c8eb8273c85e9137a57e27a441d6a89bae0f94cb0cf not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.371612 4959 scope.go:117] "RemoveContainer" containerID="d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.371895 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129"} err="failed to get container status \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": rpc error: code = NotFound desc = could not find container \"d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129\": container with ID starting with d36afcc7ea0a9e0f6d1acd30aa9bd4aa5e42dec91d3a28b500aa93804eb1b129 not found: ID does not exist" Jan 28 15:28:57 crc kubenswrapper[4959]: I0128 15:28:57.405720 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:28:57 crc kubenswrapper[4959]: W0128 15:28:57.432888 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfeab8fcd_7eb5_410a_960c_5485ad3d6e96.slice/crio-639063fac972ce0fcde0f8344231301f9de2197e1f624f5cf665a5119bf2add4 WatchSource:0}: Error finding container 639063fac972ce0fcde0f8344231301f9de2197e1f624f5cf665a5119bf2add4: Status 404 returned error can't find the container with id 639063fac972ce0fcde0f8344231301f9de2197e1f624f5cf665a5119bf2add4 Jan 28 15:28:58 crc kubenswrapper[4959]: I0128 15:28:58.152961 4959 generic.go:334] "Generic (PLEG): container finished" podID="feab8fcd-7eb5-410a-960c-5485ad3d6e96" containerID="5ce3335fc775f8118dd1ad88cffa51d8cee01f516fdc862efe8d60d7755d7988" exitCode=0 Jan 28 15:28:58 crc kubenswrapper[4959]: I0128 15:28:58.153035 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerDied","Data":"5ce3335fc775f8118dd1ad88cffa51d8cee01f516fdc862efe8d60d7755d7988"} Jan 28 15:28:58 crc kubenswrapper[4959]: I0128 15:28:58.153069 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"639063fac972ce0fcde0f8344231301f9de2197e1f624f5cf665a5119bf2add4"} Jan 28 15:28:58 crc kubenswrapper[4959]: I0128 15:28:58.599265 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bad991a-9aad-4e7b-abdd-7d23124f60a8" path="/var/lib/kubelet/pods/1bad991a-9aad-4e7b-abdd-7d23124f60a8/volumes" Jan 28 15:28:59 crc kubenswrapper[4959]: I0128 15:28:59.166635 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"7993e8657445cb4b9d89ad5391f89bb082f7ccb48703c3435bfbad6ed6c684aa"} Jan 28 15:28:59 crc kubenswrapper[4959]: I0128 15:28:59.167237 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"1923d333e59c0b7d961fe816dd7da1c879c81a09ef354dcfc644584fb9891160"} Jan 28 15:28:59 crc kubenswrapper[4959]: I0128 15:28:59.167254 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"3a5e4d5ac81c27001f1b44e15c085ed64b70aea1182681f27e77e9971502ee6d"} Jan 28 15:28:59 crc kubenswrapper[4959]: I0128 15:28:59.167267 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"13391fbb21ad1dceb045cf7f088fcfcfd64af20ff9542804cb4b75e9d288d552"} Jan 28 15:29:00 crc kubenswrapper[4959]: I0128 15:29:00.177269 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"40999aba61b9553aa135d9ae17119620b96c3225b8aeccd87b610c4d2fbabe38"} Jan 28 15:29:00 crc kubenswrapper[4959]: I0128 15:29:00.177329 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"d2ec9b4108105f7967caf57cbbbc8ec4236cceed610bccb1a6fd085558975b93"} Jan 28 15:29:01 crc kubenswrapper[4959]: I0128 15:29:01.231892 4959 scope.go:117] "RemoveContainer" containerID="62c14403ee41e19a9b5c5c55bf4716f6db7968f9cb8ea32e41a40039fbdf5d8d" Jan 28 15:29:02 crc kubenswrapper[4959]: I0128 15:29:02.191343 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/2.log" Jan 28 15:29:04 crc kubenswrapper[4959]: I0128 15:29:04.211352 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"17abcf0c5aa41e7099e457180b350d75842088824e2d361fea539acb7d4580b2"} Jan 28 15:29:06 crc kubenswrapper[4959]: I0128 15:29:06.226095 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" event={"ID":"feab8fcd-7eb5-410a-960c-5485ad3d6e96","Type":"ContainerStarted","Data":"5ca34383e7c4d93c30ed48ccb94673aa7a0fc41ca8eb491cb50e4cdfc74751d4"} Jan 28 15:29:06 crc kubenswrapper[4959]: I0128 15:29:06.226587 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:29:06 crc kubenswrapper[4959]: I0128 15:29:06.226612 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:29:06 crc kubenswrapper[4959]: I0128 15:29:06.226624 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:29:06 crc kubenswrapper[4959]: I0128 15:29:06.255070 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:29:06 crc kubenswrapper[4959]: I0128 15:29:06.256435 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:29:06 crc kubenswrapper[4959]: I0128 15:29:06.263051 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" podStartSLOduration=9.263025447 podStartE2EDuration="9.263025447s" podCreationTimestamp="2026-01-28 15:28:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:29:06.259318839 +0000 UTC m=+729.705225232" watchObservedRunningTime="2026-01-28 15:29:06.263025447 +0000 UTC m=+729.708931830" Jan 28 15:29:11 crc kubenswrapper[4959]: I0128 15:29:11.587399 4959 scope.go:117] "RemoveContainer" containerID="2ce5ba0a6fc65f5c4deba9c42f6a6ee7c031bd8945252d3ceb3501661be8235b" Jan 28 15:29:11 crc kubenswrapper[4959]: E0128 15:29:11.588272 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-bbjnj_openshift-multus(1c1dca0a-c782-43f9-9390-7dc9c5311b97)\"" pod="openshift-multus/multus-bbjnj" podUID="1c1dca0a-c782-43f9-9390-7dc9c5311b97" Jan 28 15:29:22 crc kubenswrapper[4959]: I0128 15:29:22.587149 4959 scope.go:117] "RemoveContainer" containerID="2ce5ba0a6fc65f5c4deba9c42f6a6ee7c031bd8945252d3ceb3501661be8235b" Jan 28 15:29:23 crc kubenswrapper[4959]: I0128 15:29:23.380570 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bbjnj_1c1dca0a-c782-43f9-9390-7dc9c5311b97/kube-multus/2.log" Jan 28 15:29:23 crc kubenswrapper[4959]: I0128 15:29:23.381206 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bbjnj" event={"ID":"1c1dca0a-c782-43f9-9390-7dc9c5311b97","Type":"ContainerStarted","Data":"07cd6bd20dd6df971966ad5492a3e3ce45ae5c115a399ec62a994e8201f31446"} Jan 28 15:29:27 crc kubenswrapper[4959]: I0128 15:29:27.435049 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bh658" Jan 28 15:29:28 crc kubenswrapper[4959]: I0128 15:29:28.699386 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:29:28 crc kubenswrapper[4959]: I0128 15:29:28.699841 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.404228 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw"] Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.405851 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.408859 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.420711 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw"] Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.600287 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.600388 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmb9s\" (UniqueName: \"kubernetes.io/projected/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-kube-api-access-xmb9s\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.600887 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.701618 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmb9s\" (UniqueName: \"kubernetes.io/projected/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-kube-api-access-xmb9s\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.701723 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.701768 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.702330 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.702805 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:34 crc kubenswrapper[4959]: I0128 15:29:34.729457 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmb9s\" (UniqueName: \"kubernetes.io/projected/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-kube-api-access-xmb9s\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:35 crc kubenswrapper[4959]: I0128 15:29:35.022449 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:35 crc kubenswrapper[4959]: I0128 15:29:35.291565 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw"] Jan 28 15:29:35 crc kubenswrapper[4959]: I0128 15:29:35.466799 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" event={"ID":"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a","Type":"ContainerStarted","Data":"08d330050e7d681f7dffcb44af3045f0c0ceb3a70e075b9d5b80f0a00d175c19"} Jan 28 15:29:35 crc kubenswrapper[4959]: I0128 15:29:35.466872 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" event={"ID":"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a","Type":"ContainerStarted","Data":"744f3b4fa5233b9783cccb6c2982df1a7c396db3d5be7d1b72f679b028c3316b"} Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.476665 4959 generic.go:334] "Generic (PLEG): container finished" podID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerID="08d330050e7d681f7dffcb44af3045f0c0ceb3a70e075b9d5b80f0a00d175c19" exitCode=0 Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.476776 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" event={"ID":"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a","Type":"ContainerDied","Data":"08d330050e7d681f7dffcb44af3045f0c0ceb3a70e075b9d5b80f0a00d175c19"} Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.603316 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wksbv"] Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.606367 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.621211 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wksbv"] Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.731780 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbqk6\" (UniqueName: \"kubernetes.io/projected/033584b8-d69e-4457-9d7f-37c6fa205761-kube-api-access-kbqk6\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.731875 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-catalog-content\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.731907 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-utilities\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.832737 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbqk6\" (UniqueName: \"kubernetes.io/projected/033584b8-d69e-4457-9d7f-37c6fa205761-kube-api-access-kbqk6\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.832826 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-catalog-content\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.832855 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-utilities\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.833593 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-catalog-content\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.833628 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-utilities\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:36 crc kubenswrapper[4959]: I0128 15:29:36.860815 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbqk6\" (UniqueName: \"kubernetes.io/projected/033584b8-d69e-4457-9d7f-37c6fa205761-kube-api-access-kbqk6\") pod \"redhat-operators-wksbv\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:37 crc kubenswrapper[4959]: I0128 15:29:37.032621 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:37 crc kubenswrapper[4959]: I0128 15:29:37.213902 4959 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 15:29:37 crc kubenswrapper[4959]: I0128 15:29:37.727706 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wksbv"] Jan 28 15:29:38 crc kubenswrapper[4959]: I0128 15:29:38.490903 4959 generic.go:334] "Generic (PLEG): container finished" podID="033584b8-d69e-4457-9d7f-37c6fa205761" containerID="a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8" exitCode=0 Jan 28 15:29:38 crc kubenswrapper[4959]: I0128 15:29:38.490987 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wksbv" event={"ID":"033584b8-d69e-4457-9d7f-37c6fa205761","Type":"ContainerDied","Data":"a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8"} Jan 28 15:29:38 crc kubenswrapper[4959]: I0128 15:29:38.491399 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wksbv" event={"ID":"033584b8-d69e-4457-9d7f-37c6fa205761","Type":"ContainerStarted","Data":"426a1bc9b3cc8c2c6ac3a9c823a628b250fc49809b1a66eb23c6ba27b4b3fcc9"} Jan 28 15:29:39 crc kubenswrapper[4959]: I0128 15:29:39.504701 4959 generic.go:334] "Generic (PLEG): container finished" podID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerID="4d3ac1927320693ec8373163692901523a34c713525723d545e142c7ae58b938" exitCode=0 Jan 28 15:29:39 crc kubenswrapper[4959]: I0128 15:29:39.504839 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" event={"ID":"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a","Type":"ContainerDied","Data":"4d3ac1927320693ec8373163692901523a34c713525723d545e142c7ae58b938"} Jan 28 15:29:40 crc kubenswrapper[4959]: I0128 15:29:40.513287 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wksbv" event={"ID":"033584b8-d69e-4457-9d7f-37c6fa205761","Type":"ContainerStarted","Data":"c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6"} Jan 28 15:29:40 crc kubenswrapper[4959]: I0128 15:29:40.519344 4959 generic.go:334] "Generic (PLEG): container finished" podID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerID="452501360c5f01cd91453de1fb889dbc79ed98054ba7f11424238459aefd020a" exitCode=0 Jan 28 15:29:40 crc kubenswrapper[4959]: I0128 15:29:40.519378 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" event={"ID":"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a","Type":"ContainerDied","Data":"452501360c5f01cd91453de1fb889dbc79ed98054ba7f11424238459aefd020a"} Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.334581 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.522519 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmb9s\" (UniqueName: \"kubernetes.io/projected/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-kube-api-access-xmb9s\") pod \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.522693 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-util\") pod \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.522821 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-bundle\") pod \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\" (UID: \"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a\") " Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.524083 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-bundle" (OuterVolumeSpecName: "bundle") pod "7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" (UID: "7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.532613 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-kube-api-access-xmb9s" (OuterVolumeSpecName: "kube-api-access-xmb9s") pod "7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" (UID: "7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a"). InnerVolumeSpecName "kube-api-access-xmb9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.533816 4959 generic.go:334] "Generic (PLEG): container finished" podID="033584b8-d69e-4457-9d7f-37c6fa205761" containerID="c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6" exitCode=0 Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.533911 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wksbv" event={"ID":"033584b8-d69e-4457-9d7f-37c6fa205761","Type":"ContainerDied","Data":"c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6"} Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.539708 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" event={"ID":"7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a","Type":"ContainerDied","Data":"744f3b4fa5233b9783cccb6c2982df1a7c396db3d5be7d1b72f679b028c3316b"} Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.539779 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="744f3b4fa5233b9783cccb6c2982df1a7c396db3d5be7d1b72f679b028c3316b" Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.539880 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw" Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.554308 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-util" (OuterVolumeSpecName: "util") pod "7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" (UID: "7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.625010 4959 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.625064 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmb9s\" (UniqueName: \"kubernetes.io/projected/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-kube-api-access-xmb9s\") on node \"crc\" DevicePath \"\"" Jan 28 15:29:42 crc kubenswrapper[4959]: I0128 15:29:42.625140 4959 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a-util\") on node \"crc\" DevicePath \"\"" Jan 28 15:29:43 crc kubenswrapper[4959]: I0128 15:29:43.547317 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wksbv" event={"ID":"033584b8-d69e-4457-9d7f-37c6fa205761","Type":"ContainerStarted","Data":"5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b"} Jan 28 15:29:43 crc kubenswrapper[4959]: I0128 15:29:43.572266 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wksbv" podStartSLOduration=3.236573337 podStartE2EDuration="7.572237759s" podCreationTimestamp="2026-01-28 15:29:36 +0000 UTC" firstStartedPulling="2026-01-28 15:29:38.621222052 +0000 UTC m=+762.067128435" lastFinishedPulling="2026-01-28 15:29:42.956886464 +0000 UTC m=+766.402792857" observedRunningTime="2026-01-28 15:29:43.565042716 +0000 UTC m=+767.010949129" watchObservedRunningTime="2026-01-28 15:29:43.572237759 +0000 UTC m=+767.018144152" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.510327 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-xvpdr"] Jan 28 15:29:44 crc kubenswrapper[4959]: E0128 15:29:44.511008 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerName="pull" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.511025 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerName="pull" Jan 28 15:29:44 crc kubenswrapper[4959]: E0128 15:29:44.511037 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerName="extract" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.511043 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerName="extract" Jan 28 15:29:44 crc kubenswrapper[4959]: E0128 15:29:44.511057 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerName="util" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.511066 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerName="util" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.511215 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a" containerName="extract" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.511705 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-xvpdr" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.514416 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-lwcds" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.514864 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.518026 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.527057 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-xvpdr"] Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.552141 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plb88\" (UniqueName: \"kubernetes.io/projected/a0e99b55-ca45-4cbf-9141-7ee49d98b970-kube-api-access-plb88\") pod \"nmstate-operator-646758c888-xvpdr\" (UID: \"a0e99b55-ca45-4cbf-9141-7ee49d98b970\") " pod="openshift-nmstate/nmstate-operator-646758c888-xvpdr" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.653975 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plb88\" (UniqueName: \"kubernetes.io/projected/a0e99b55-ca45-4cbf-9141-7ee49d98b970-kube-api-access-plb88\") pod \"nmstate-operator-646758c888-xvpdr\" (UID: \"a0e99b55-ca45-4cbf-9141-7ee49d98b970\") " pod="openshift-nmstate/nmstate-operator-646758c888-xvpdr" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.685544 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plb88\" (UniqueName: \"kubernetes.io/projected/a0e99b55-ca45-4cbf-9141-7ee49d98b970-kube-api-access-plb88\") pod \"nmstate-operator-646758c888-xvpdr\" (UID: \"a0e99b55-ca45-4cbf-9141-7ee49d98b970\") " pod="openshift-nmstate/nmstate-operator-646758c888-xvpdr" Jan 28 15:29:44 crc kubenswrapper[4959]: I0128 15:29:44.828997 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-xvpdr" Jan 28 15:29:45 crc kubenswrapper[4959]: I0128 15:29:45.379095 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-xvpdr"] Jan 28 15:29:45 crc kubenswrapper[4959]: I0128 15:29:45.561647 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-xvpdr" event={"ID":"a0e99b55-ca45-4cbf-9141-7ee49d98b970","Type":"ContainerStarted","Data":"0ec2052a2843f4a95e6b5c4225fcceffdb07eff73147d9c2f693ba6f33d9e867"} Jan 28 15:29:47 crc kubenswrapper[4959]: I0128 15:29:47.157794 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:47 crc kubenswrapper[4959]: I0128 15:29:47.157844 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:48 crc kubenswrapper[4959]: I0128 15:29:48.184534 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wksbv" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="registry-server" probeResult="failure" output=< Jan 28 15:29:48 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 15:29:48 crc kubenswrapper[4959]: > Jan 28 15:29:51 crc kubenswrapper[4959]: I0128 15:29:51.188618 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-xvpdr" event={"ID":"a0e99b55-ca45-4cbf-9141-7ee49d98b970","Type":"ContainerStarted","Data":"84db9b4901ab9702a291ace3c9be5e6eb37da5ba0c92bc6d59ed7a0c1a55041e"} Jan 28 15:29:51 crc kubenswrapper[4959]: I0128 15:29:51.211693 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-xvpdr" podStartSLOduration=1.7364453819999999 podStartE2EDuration="7.211661781s" podCreationTimestamp="2026-01-28 15:29:44 +0000 UTC" firstStartedPulling="2026-01-28 15:29:45.388610528 +0000 UTC m=+768.834516911" lastFinishedPulling="2026-01-28 15:29:50.863826927 +0000 UTC m=+774.309733310" observedRunningTime="2026-01-28 15:29:51.20830099 +0000 UTC m=+774.654207413" watchObservedRunningTime="2026-01-28 15:29:51.211661781 +0000 UTC m=+774.657568174" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.550830 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-ffgf2"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.552166 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.554652 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-htlx6" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.562638 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.564051 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.571946 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.576380 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-ffgf2"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.578964 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.595681 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-d7gzv"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.596490 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.628398 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-dbus-socket\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.628436 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-ovs-socket\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.628471 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-nmstate-lock\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.628507 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dslrh\" (UniqueName: \"kubernetes.io/projected/f9551657-7ead-43f2-ab0d-0a00ca38d632-kube-api-access-dslrh\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.628558 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf49t\" (UniqueName: \"kubernetes.io/projected/4af3461f-6819-4c66-a2e5-3bdcb0d20557-kube-api-access-kf49t\") pod \"nmstate-webhook-8474b5b9d8-jg2rh\" (UID: \"4af3461f-6819-4c66-a2e5-3bdcb0d20557\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.628635 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/4af3461f-6819-4c66-a2e5-3bdcb0d20557-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-jg2rh\" (UID: \"4af3461f-6819-4c66-a2e5-3bdcb0d20557\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.628657 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs4x2\" (UniqueName: \"kubernetes.io/projected/7c5e3f8a-ef22-47d0-99e2-3dc85615a832-kube-api-access-vs4x2\") pod \"nmstate-metrics-54757c584b-ffgf2\" (UID: \"7c5e3f8a-ef22-47d0-99e2-3dc85615a832\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.713330 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.714085 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.716194 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.717624 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-bf56f" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.717743 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.728678 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.729561 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dslrh\" (UniqueName: \"kubernetes.io/projected/f9551657-7ead-43f2-ab0d-0a00ca38d632-kube-api-access-dslrh\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.729632 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf49t\" (UniqueName: \"kubernetes.io/projected/4af3461f-6819-4c66-a2e5-3bdcb0d20557-kube-api-access-kf49t\") pod \"nmstate-webhook-8474b5b9d8-jg2rh\" (UID: \"4af3461f-6819-4c66-a2e5-3bdcb0d20557\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.729719 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/4af3461f-6819-4c66-a2e5-3bdcb0d20557-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-jg2rh\" (UID: \"4af3461f-6819-4c66-a2e5-3bdcb0d20557\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.729949 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs4x2\" (UniqueName: \"kubernetes.io/projected/7c5e3f8a-ef22-47d0-99e2-3dc85615a832-kube-api-access-vs4x2\") pod \"nmstate-metrics-54757c584b-ffgf2\" (UID: \"7c5e3f8a-ef22-47d0-99e2-3dc85615a832\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" Jan 28 15:29:54 crc kubenswrapper[4959]: E0128 15:29:54.729889 4959 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 28 15:29:54 crc kubenswrapper[4959]: E0128 15:29:54.730076 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4af3461f-6819-4c66-a2e5-3bdcb0d20557-tls-key-pair podName:4af3461f-6819-4c66-a2e5-3bdcb0d20557 nodeName:}" failed. No retries permitted until 2026-01-28 15:29:55.230044379 +0000 UTC m=+778.675950762 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/4af3461f-6819-4c66-a2e5-3bdcb0d20557-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-jg2rh" (UID: "4af3461f-6819-4c66-a2e5-3bdcb0d20557") : secret "openshift-nmstate-webhook" not found Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.732619 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-dbus-socket\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.732686 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-ovs-socket\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.732749 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-nmstate-lock\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.732896 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-nmstate-lock\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.732962 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-ovs-socket\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.733080 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f9551657-7ead-43f2-ab0d-0a00ca38d632-dbus-socket\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.758128 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dslrh\" (UniqueName: \"kubernetes.io/projected/f9551657-7ead-43f2-ab0d-0a00ca38d632-kube-api-access-dslrh\") pod \"nmstate-handler-d7gzv\" (UID: \"f9551657-7ead-43f2-ab0d-0a00ca38d632\") " pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.758135 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs4x2\" (UniqueName: \"kubernetes.io/projected/7c5e3f8a-ef22-47d0-99e2-3dc85615a832-kube-api-access-vs4x2\") pod \"nmstate-metrics-54757c584b-ffgf2\" (UID: \"7c5e3f8a-ef22-47d0-99e2-3dc85615a832\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.773917 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf49t\" (UniqueName: \"kubernetes.io/projected/4af3461f-6819-4c66-a2e5-3bdcb0d20557-kube-api-access-kf49t\") pod \"nmstate-webhook-8474b5b9d8-jg2rh\" (UID: \"4af3461f-6819-4c66-a2e5-3bdcb0d20557\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.834669 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9230b3bb-f87d-4da9-83f5-624c4cc250eb-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.834846 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9230b3bb-f87d-4da9-83f5-624c4cc250eb-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.835000 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m2c2\" (UniqueName: \"kubernetes.io/projected/9230b3bb-f87d-4da9-83f5-624c4cc250eb-kube-api-access-5m2c2\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.872275 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.914378 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.916251 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f5c74688b-rxtph"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.919437 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.936821 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9230b3bb-f87d-4da9-83f5-624c4cc250eb-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.936884 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9230b3bb-f87d-4da9-83f5-624c4cc250eb-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.936936 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m2c2\" (UniqueName: \"kubernetes.io/projected/9230b3bb-f87d-4da9-83f5-624c4cc250eb-kube-api-access-5m2c2\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.938496 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9230b3bb-f87d-4da9-83f5-624c4cc250eb-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.943548 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9230b3bb-f87d-4da9-83f5-624c4cc250eb-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.950694 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f5c74688b-rxtph"] Jan 28 15:29:54 crc kubenswrapper[4959]: I0128 15:29:54.964123 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m2c2\" (UniqueName: \"kubernetes.io/projected/9230b3bb-f87d-4da9-83f5-624c4cc250eb-kube-api-access-5m2c2\") pod \"nmstate-console-plugin-7754f76f8b-nqbr4\" (UID: \"9230b3bb-f87d-4da9-83f5-624c4cc250eb\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.038654 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-oauth-serving-cert\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.038750 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-service-ca\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.038782 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g9dk\" (UniqueName: \"kubernetes.io/projected/57f352d5-135a-4371-a604-87ad6334f929-kube-api-access-8g9dk\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.038806 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-trusted-ca-bundle\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.038842 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/57f352d5-135a-4371-a604-87ad6334f929-console-oauth-config\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.038984 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/57f352d5-135a-4371-a604-87ad6334f929-console-serving-cert\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.039299 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-console-config\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.039904 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.141819 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-console-config\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.142268 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-oauth-serving-cert\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.142320 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-service-ca\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.142363 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g9dk\" (UniqueName: \"kubernetes.io/projected/57f352d5-135a-4371-a604-87ad6334f929-kube-api-access-8g9dk\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.142384 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-trusted-ca-bundle\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.142441 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/57f352d5-135a-4371-a604-87ad6334f929-console-oauth-config\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.142461 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/57f352d5-135a-4371-a604-87ad6334f929-console-serving-cert\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.146408 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-console-config\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.188515 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-oauth-serving-cert\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.189985 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-service-ca\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.190036 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/57f352d5-135a-4371-a604-87ad6334f929-console-serving-cert\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.190731 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/57f352d5-135a-4371-a604-87ad6334f929-trusted-ca-bundle\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.192243 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/57f352d5-135a-4371-a604-87ad6334f929-console-oauth-config\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.234673 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g9dk\" (UniqueName: \"kubernetes.io/projected/57f352d5-135a-4371-a604-87ad6334f929-kube-api-access-8g9dk\") pod \"console-f5c74688b-rxtph\" (UID: \"57f352d5-135a-4371-a604-87ad6334f929\") " pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.235676 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-d7gzv" event={"ID":"f9551657-7ead-43f2-ab0d-0a00ca38d632","Type":"ContainerStarted","Data":"b40ed91866078db4025c09ab44f1f651ffec1e4b91163442680391110f883a50"} Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.248833 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/4af3461f-6819-4c66-a2e5-3bdcb0d20557-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-jg2rh\" (UID: \"4af3461f-6819-4c66-a2e5-3bdcb0d20557\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.253569 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/4af3461f-6819-4c66-a2e5-3bdcb0d20557-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-jg2rh\" (UID: \"4af3461f-6819-4c66-a2e5-3bdcb0d20557\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.279588 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.483688 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.544980 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-ffgf2"] Jan 28 15:29:55 crc kubenswrapper[4959]: W0128 15:29:55.556673 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c5e3f8a_ef22_47d0_99e2_3dc85615a832.slice/crio-e7605d41ec3db5a1a323a2bc13233364b3c34843dd6a07a2810c3ae2b4336832 WatchSource:0}: Error finding container e7605d41ec3db5a1a323a2bc13233364b3c34843dd6a07a2810c3ae2b4336832: Status 404 returned error can't find the container with id e7605d41ec3db5a1a323a2bc13233364b3c34843dd6a07a2810c3ae2b4336832 Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.596980 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4"] Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.735725 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f5c74688b-rxtph"] Jan 28 15:29:55 crc kubenswrapper[4959]: I0128 15:29:55.788431 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh"] Jan 28 15:29:55 crc kubenswrapper[4959]: W0128 15:29:55.803451 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4af3461f_6819_4c66_a2e5_3bdcb0d20557.slice/crio-80a4099cb54243b4184a7621fd8121b1951c82a1df46b26689fb8bd74889fdf4 WatchSource:0}: Error finding container 80a4099cb54243b4184a7621fd8121b1951c82a1df46b26689fb8bd74889fdf4: Status 404 returned error can't find the container with id 80a4099cb54243b4184a7621fd8121b1951c82a1df46b26689fb8bd74889fdf4 Jan 28 15:29:56 crc kubenswrapper[4959]: I0128 15:29:56.245200 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" event={"ID":"9230b3bb-f87d-4da9-83f5-624c4cc250eb","Type":"ContainerStarted","Data":"f064ceaaee188c4244e0a058c2b1eed38954419694652cdbc893251f8f86f7b5"} Jan 28 15:29:56 crc kubenswrapper[4959]: I0128 15:29:56.246911 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" event={"ID":"7c5e3f8a-ef22-47d0-99e2-3dc85615a832","Type":"ContainerStarted","Data":"e7605d41ec3db5a1a323a2bc13233364b3c34843dd6a07a2810c3ae2b4336832"} Jan 28 15:29:56 crc kubenswrapper[4959]: I0128 15:29:56.248994 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f5c74688b-rxtph" event={"ID":"57f352d5-135a-4371-a604-87ad6334f929","Type":"ContainerStarted","Data":"0649a70679e8428dc9d87c3a8e788467e6a1afd84d2aa2765f2058a0210ddf67"} Jan 28 15:29:56 crc kubenswrapper[4959]: I0128 15:29:56.249041 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f5c74688b-rxtph" event={"ID":"57f352d5-135a-4371-a604-87ad6334f929","Type":"ContainerStarted","Data":"4db76630b92db38f84827c96fa06fcb4125f8671ec3c7ceeca01dd73f0f6add5"} Jan 28 15:29:56 crc kubenswrapper[4959]: I0128 15:29:56.250384 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" event={"ID":"4af3461f-6819-4c66-a2e5-3bdcb0d20557","Type":"ContainerStarted","Data":"80a4099cb54243b4184a7621fd8121b1951c82a1df46b26689fb8bd74889fdf4"} Jan 28 15:29:56 crc kubenswrapper[4959]: I0128 15:29:56.270535 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f5c74688b-rxtph" podStartSLOduration=2.270505815 podStartE2EDuration="2.270505815s" podCreationTimestamp="2026-01-28 15:29:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:29:56.269904081 +0000 UTC m=+779.715810484" watchObservedRunningTime="2026-01-28 15:29:56.270505815 +0000 UTC m=+779.716412198" Jan 28 15:29:57 crc kubenswrapper[4959]: I0128 15:29:57.084898 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:57 crc kubenswrapper[4959]: I0128 15:29:57.151554 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:58 crc kubenswrapper[4959]: I0128 15:29:58.690079 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:29:58 crc kubenswrapper[4959]: I0128 15:29:58.690573 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.279503 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-d7gzv" event={"ID":"f9551657-7ead-43f2-ab0d-0a00ca38d632","Type":"ContainerStarted","Data":"99b856b903562e48f1a6fa67a7568357d089648555c5fef4c5bd4a99cf478a6f"} Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.281077 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.281276 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" event={"ID":"7c5e3f8a-ef22-47d0-99e2-3dc85615a832","Type":"ContainerStarted","Data":"7ca2c6e18d28c9b37acbd97476ef46d9af7db666064515e1df87ba48336db871"} Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.282820 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" event={"ID":"4af3461f-6819-4c66-a2e5-3bdcb0d20557","Type":"ContainerStarted","Data":"5c4932baac9f9fa7167c5bb296158e0a8f68f87691b7f863e11418ef3ad433f0"} Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.283779 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.292615 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" event={"ID":"9230b3bb-f87d-4da9-83f5-624c4cc250eb","Type":"ContainerStarted","Data":"e69f3ff063d2453a0adb526668b78bac389de7af0bcac2a611e64e80fe39b79e"} Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.303896 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-d7gzv" podStartSLOduration=1.299900444 podStartE2EDuration="5.303885809s" podCreationTimestamp="2026-01-28 15:29:54 +0000 UTC" firstStartedPulling="2026-01-28 15:29:54.952264033 +0000 UTC m=+778.398170416" lastFinishedPulling="2026-01-28 15:29:58.956249398 +0000 UTC m=+782.402155781" observedRunningTime="2026-01-28 15:29:59.302722222 +0000 UTC m=+782.748628645" watchObservedRunningTime="2026-01-28 15:29:59.303885809 +0000 UTC m=+782.749792192" Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.322860 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" podStartSLOduration=2.176086764 podStartE2EDuration="5.322847887s" podCreationTimestamp="2026-01-28 15:29:54 +0000 UTC" firstStartedPulling="2026-01-28 15:29:55.806361333 +0000 UTC m=+779.252267716" lastFinishedPulling="2026-01-28 15:29:58.953122446 +0000 UTC m=+782.399028839" observedRunningTime="2026-01-28 15:29:59.319133862 +0000 UTC m=+782.765040285" watchObservedRunningTime="2026-01-28 15:29:59.322847887 +0000 UTC m=+782.768754270" Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.338445 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-nqbr4" podStartSLOduration=2.013194791 podStartE2EDuration="5.338423947s" podCreationTimestamp="2026-01-28 15:29:54 +0000 UTC" firstStartedPulling="2026-01-28 15:29:55.61923932 +0000 UTC m=+779.065145703" lastFinishedPulling="2026-01-28 15:29:58.944468466 +0000 UTC m=+782.390374859" observedRunningTime="2026-01-28 15:29:59.334197319 +0000 UTC m=+782.780103712" watchObservedRunningTime="2026-01-28 15:29:59.338423947 +0000 UTC m=+782.784330330" Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.390801 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wksbv"] Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.391379 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wksbv" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="registry-server" containerID="cri-o://5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b" gracePeriod=2 Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.782198 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.923167 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbqk6\" (UniqueName: \"kubernetes.io/projected/033584b8-d69e-4457-9d7f-37c6fa205761-kube-api-access-kbqk6\") pod \"033584b8-d69e-4457-9d7f-37c6fa205761\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.923250 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-catalog-content\") pod \"033584b8-d69e-4457-9d7f-37c6fa205761\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.923282 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-utilities\") pod \"033584b8-d69e-4457-9d7f-37c6fa205761\" (UID: \"033584b8-d69e-4457-9d7f-37c6fa205761\") " Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.924664 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-utilities" (OuterVolumeSpecName: "utilities") pod "033584b8-d69e-4457-9d7f-37c6fa205761" (UID: "033584b8-d69e-4457-9d7f-37c6fa205761"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:29:59 crc kubenswrapper[4959]: I0128 15:29:59.931535 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/033584b8-d69e-4457-9d7f-37c6fa205761-kube-api-access-kbqk6" (OuterVolumeSpecName: "kube-api-access-kbqk6") pod "033584b8-d69e-4457-9d7f-37c6fa205761" (UID: "033584b8-d69e-4457-9d7f-37c6fa205761"). InnerVolumeSpecName "kube-api-access-kbqk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.024996 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.025038 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbqk6\" (UniqueName: \"kubernetes.io/projected/033584b8-d69e-4457-9d7f-37c6fa205761-kube-api-access-kbqk6\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.049746 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "033584b8-d69e-4457-9d7f-37c6fa205761" (UID: "033584b8-d69e-4457-9d7f-37c6fa205761"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.127270 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/033584b8-d69e-4457-9d7f-37c6fa205761-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.145673 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv"] Jan 28 15:30:00 crc kubenswrapper[4959]: E0128 15:30:00.145949 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="extract-content" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.145968 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="extract-content" Jan 28 15:30:00 crc kubenswrapper[4959]: E0128 15:30:00.145981 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="registry-server" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.145989 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="registry-server" Jan 28 15:30:00 crc kubenswrapper[4959]: E0128 15:30:00.145998 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="extract-utilities" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.146005 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="extract-utilities" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.146142 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" containerName="registry-server" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.146636 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.149210 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.149336 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.156433 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv"] Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.228445 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-config-volume\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.228545 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-secret-volume\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.228588 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs9tc\" (UniqueName: \"kubernetes.io/projected/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-kube-api-access-zs9tc\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.303662 4959 generic.go:334] "Generic (PLEG): container finished" podID="033584b8-d69e-4457-9d7f-37c6fa205761" containerID="5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b" exitCode=0 Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.303728 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wksbv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.303772 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wksbv" event={"ID":"033584b8-d69e-4457-9d7f-37c6fa205761","Type":"ContainerDied","Data":"5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b"} Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.303807 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wksbv" event={"ID":"033584b8-d69e-4457-9d7f-37c6fa205761","Type":"ContainerDied","Data":"426a1bc9b3cc8c2c6ac3a9c823a628b250fc49809b1a66eb23c6ba27b4b3fcc9"} Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.303829 4959 scope.go:117] "RemoveContainer" containerID="5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.331180 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-config-volume\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.331271 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-secret-volume\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.331317 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs9tc\" (UniqueName: \"kubernetes.io/projected/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-kube-api-access-zs9tc\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.332725 4959 scope.go:117] "RemoveContainer" containerID="c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.333926 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-config-volume\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.335949 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wksbv"] Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.347949 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-secret-volume\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.352267 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wksbv"] Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.358596 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs9tc\" (UniqueName: \"kubernetes.io/projected/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-kube-api-access-zs9tc\") pod \"collect-profiles-29493570-zhrsv\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.393545 4959 scope.go:117] "RemoveContainer" containerID="a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.410003 4959 scope.go:117] "RemoveContainer" containerID="5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b" Jan 28 15:30:00 crc kubenswrapper[4959]: E0128 15:30:00.410550 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b\": container with ID starting with 5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b not found: ID does not exist" containerID="5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.410609 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b"} err="failed to get container status \"5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b\": rpc error: code = NotFound desc = could not find container \"5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b\": container with ID starting with 5c7dc510553b0bb04067604f1cc50900fde1c1d6122b59d8b53e96764b190d0b not found: ID does not exist" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.410650 4959 scope.go:117] "RemoveContainer" containerID="c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6" Jan 28 15:30:00 crc kubenswrapper[4959]: E0128 15:30:00.411271 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6\": container with ID starting with c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6 not found: ID does not exist" containerID="c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.411313 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6"} err="failed to get container status \"c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6\": rpc error: code = NotFound desc = could not find container \"c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6\": container with ID starting with c71ba2f4e5ee54b60440da77df44bbf20e424aad4f512e2eb13ead9726b49ff6 not found: ID does not exist" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.411335 4959 scope.go:117] "RemoveContainer" containerID="a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8" Jan 28 15:30:00 crc kubenswrapper[4959]: E0128 15:30:00.411801 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8\": container with ID starting with a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8 not found: ID does not exist" containerID="a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.411920 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8"} err="failed to get container status \"a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8\": rpc error: code = NotFound desc = could not find container \"a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8\": container with ID starting with a5eb181b2935776a7089810bfda5039ac992610fdc21c5f86b4450af3391f7d8 not found: ID does not exist" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.486320 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.493194 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.610337 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="033584b8-d69e-4457-9d7f-37c6fa205761" path="/var/lib/kubelet/pods/033584b8-d69e-4457-9d7f-37c6fa205761/volumes" Jan 28 15:30:00 crc kubenswrapper[4959]: I0128 15:30:00.910658 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv"] Jan 28 15:30:00 crc kubenswrapper[4959]: W0128 15:30:00.925269 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c9e4b78_98fb_4ef7_b4db_3bfc859f31ea.slice/crio-05080850b8eaf6f21559f38441185104371e3ecefb9a0d70d1a04849620fe234 WatchSource:0}: Error finding container 05080850b8eaf6f21559f38441185104371e3ecefb9a0d70d1a04849620fe234: Status 404 returned error can't find the container with id 05080850b8eaf6f21559f38441185104371e3ecefb9a0d70d1a04849620fe234 Jan 28 15:30:01 crc kubenswrapper[4959]: I0128 15:30:01.325659 4959 generic.go:334] "Generic (PLEG): container finished" podID="6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea" containerID="5cd879994ea1b3bc20e97394016d67f5a684f1bd3214ba59c226925e30e42cf1" exitCode=0 Jan 28 15:30:01 crc kubenswrapper[4959]: I0128 15:30:01.325760 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" event={"ID":"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea","Type":"ContainerDied","Data":"5cd879994ea1b3bc20e97394016d67f5a684f1bd3214ba59c226925e30e42cf1"} Jan 28 15:30:01 crc kubenswrapper[4959]: I0128 15:30:01.325805 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" event={"ID":"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea","Type":"ContainerStarted","Data":"05080850b8eaf6f21559f38441185104371e3ecefb9a0d70d1a04849620fe234"} Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.334092 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" event={"ID":"7c5e3f8a-ef22-47d0-99e2-3dc85615a832","Type":"ContainerStarted","Data":"23ddcd7e92d1f98d7acd9ffd099cf1b89c6e94514f3dff6f659a00e61a9039d2"} Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.356706 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-ffgf2" podStartSLOduration=2.32696563 podStartE2EDuration="8.356680121s" podCreationTimestamp="2026-01-28 15:29:54 +0000 UTC" firstStartedPulling="2026-01-28 15:29:55.568248953 +0000 UTC m=+779.014155336" lastFinishedPulling="2026-01-28 15:30:01.597963444 +0000 UTC m=+785.043869827" observedRunningTime="2026-01-28 15:30:02.353968509 +0000 UTC m=+785.799874922" watchObservedRunningTime="2026-01-28 15:30:02.356680121 +0000 UTC m=+785.802586504" Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.580071 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.664781 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-secret-volume\") pod \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.664992 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-config-volume\") pod \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.665030 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zs9tc\" (UniqueName: \"kubernetes.io/projected/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-kube-api-access-zs9tc\") pod \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\" (UID: \"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea\") " Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.668071 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-config-volume" (OuterVolumeSpecName: "config-volume") pod "6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea" (UID: "6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.676852 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-kube-api-access-zs9tc" (OuterVolumeSpecName: "kube-api-access-zs9tc") pod "6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea" (UID: "6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea"). InnerVolumeSpecName "kube-api-access-zs9tc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.680444 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea" (UID: "6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.767401 4959 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.767476 4959 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:02 crc kubenswrapper[4959]: I0128 15:30:02.767547 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zs9tc\" (UniqueName: \"kubernetes.io/projected/6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea-kube-api-access-zs9tc\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:03 crc kubenswrapper[4959]: I0128 15:30:03.353569 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" Jan 28 15:30:03 crc kubenswrapper[4959]: I0128 15:30:03.353562 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493570-zhrsv" event={"ID":"6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea","Type":"ContainerDied","Data":"05080850b8eaf6f21559f38441185104371e3ecefb9a0d70d1a04849620fe234"} Jan 28 15:30:03 crc kubenswrapper[4959]: I0128 15:30:03.353635 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05080850b8eaf6f21559f38441185104371e3ecefb9a0d70d1a04849620fe234" Jan 28 15:30:04 crc kubenswrapper[4959]: I0128 15:30:04.948487 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-d7gzv" Jan 28 15:30:05 crc kubenswrapper[4959]: I0128 15:30:05.281247 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:30:05 crc kubenswrapper[4959]: I0128 15:30:05.281351 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:30:05 crc kubenswrapper[4959]: I0128 15:30:05.291209 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:30:05 crc kubenswrapper[4959]: I0128 15:30:05.382614 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f5c74688b-rxtph" Jan 28 15:30:05 crc kubenswrapper[4959]: I0128 15:30:05.446981 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-b7ncr"] Jan 28 15:30:15 crc kubenswrapper[4959]: I0128 15:30:15.491220 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jg2rh" Jan 28 15:30:27 crc kubenswrapper[4959]: I0128 15:30:27.955855 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z"] Jan 28 15:30:27 crc kubenswrapper[4959]: E0128 15:30:27.959140 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea" containerName="collect-profiles" Jan 28 15:30:27 crc kubenswrapper[4959]: I0128 15:30:27.960545 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea" containerName="collect-profiles" Jan 28 15:30:27 crc kubenswrapper[4959]: I0128 15:30:27.961217 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c9e4b78-98fb-4ef7-b4db-3bfc859f31ea" containerName="collect-profiles" Jan 28 15:30:27 crc kubenswrapper[4959]: I0128 15:30:27.962330 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:27 crc kubenswrapper[4959]: I0128 15:30:27.964691 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 15:30:27 crc kubenswrapper[4959]: I0128 15:30:27.970015 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z"] Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.056544 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znqp4\" (UniqueName: \"kubernetes.io/projected/2973aad5-f211-4e21-a51c-c1d0c79d1e99-kube-api-access-znqp4\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.056981 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.057008 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.158724 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znqp4\" (UniqueName: \"kubernetes.io/projected/2973aad5-f211-4e21-a51c-c1d0c79d1e99-kube-api-access-znqp4\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.158904 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.158944 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.160153 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.161929 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.182719 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znqp4\" (UniqueName: \"kubernetes.io/projected/2973aad5-f211-4e21-a51c-c1d0c79d1e99-kube-api-access-znqp4\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.282978 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.689065 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.689646 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.689718 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.690657 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5190babcd72aa8d406516ffe324e9eaebbd4bede0a5bd30239f36eb74204fed6"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.690743 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://5190babcd72aa8d406516ffe324e9eaebbd4bede0a5bd30239f36eb74204fed6" gracePeriod=600 Jan 28 15:30:28 crc kubenswrapper[4959]: I0128 15:30:28.742170 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z"] Jan 28 15:30:29 crc kubenswrapper[4959]: I0128 15:30:29.538251 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" event={"ID":"2973aad5-f211-4e21-a51c-c1d0c79d1e99","Type":"ContainerStarted","Data":"fe571e3cfc225080ed11e55d16a59aaba4efe140488fb00f73bc05a19094b72d"} Jan 28 15:30:30 crc kubenswrapper[4959]: I0128 15:30:30.486747 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-b7ncr" podUID="0032b760-b9d9-4533-ae6c-dfe3e55d16e6" containerName="console" containerID="cri-o://1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634" gracePeriod=15 Jan 28 15:30:30 crc kubenswrapper[4959]: I0128 15:30:30.545864 4959 generic.go:334] "Generic (PLEG): container finished" podID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerID="4be965ee3de18b391a55b1d1c42c1152daaa44464d3f4ea80dcb63a8154552f5" exitCode=0 Jan 28 15:30:30 crc kubenswrapper[4959]: I0128 15:30:30.545920 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" event={"ID":"2973aad5-f211-4e21-a51c-c1d0c79d1e99","Type":"ContainerDied","Data":"4be965ee3de18b391a55b1d1c42c1152daaa44464d3f4ea80dcb63a8154552f5"} Jan 28 15:30:30 crc kubenswrapper[4959]: I0128 15:30:30.549644 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="5190babcd72aa8d406516ffe324e9eaebbd4bede0a5bd30239f36eb74204fed6" exitCode=0 Jan 28 15:30:30 crc kubenswrapper[4959]: I0128 15:30:30.549690 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"5190babcd72aa8d406516ffe324e9eaebbd4bede0a5bd30239f36eb74204fed6"} Jan 28 15:30:30 crc kubenswrapper[4959]: I0128 15:30:30.549733 4959 scope.go:117] "RemoveContainer" containerID="e5d004ca357a0dc2c6c28e91b74898a337366c10d600aff9af5ea78d94bfa6c7" Jan 28 15:30:30 crc kubenswrapper[4959]: I0128 15:30:30.860081 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-b7ncr_0032b760-b9d9-4533-ae6c-dfe3e55d16e6/console/0.log" Jan 28 15:30:30 crc kubenswrapper[4959]: I0128 15:30:30.860510 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.008504 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-config\") pod \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.008718 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-service-ca\") pod \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.008822 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-oauth-serving-cert\") pod \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.009014 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkftf\" (UniqueName: \"kubernetes.io/projected/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-kube-api-access-lkftf\") pod \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.009136 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-trusted-ca-bundle\") pod \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.009302 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-oauth-config\") pod \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.009427 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-serving-cert\") pod \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\" (UID: \"0032b760-b9d9-4533-ae6c-dfe3e55d16e6\") " Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.009781 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-service-ca" (OuterVolumeSpecName: "service-ca") pod "0032b760-b9d9-4533-ae6c-dfe3e55d16e6" (UID: "0032b760-b9d9-4533-ae6c-dfe3e55d16e6"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.009821 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-config" (OuterVolumeSpecName: "console-config") pod "0032b760-b9d9-4533-ae6c-dfe3e55d16e6" (UID: "0032b760-b9d9-4533-ae6c-dfe3e55d16e6"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.009978 4959 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.010052 4959 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.010402 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "0032b760-b9d9-4533-ae6c-dfe3e55d16e6" (UID: "0032b760-b9d9-4533-ae6c-dfe3e55d16e6"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.011268 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "0032b760-b9d9-4533-ae6c-dfe3e55d16e6" (UID: "0032b760-b9d9-4533-ae6c-dfe3e55d16e6"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.018414 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-kube-api-access-lkftf" (OuterVolumeSpecName: "kube-api-access-lkftf") pod "0032b760-b9d9-4533-ae6c-dfe3e55d16e6" (UID: "0032b760-b9d9-4533-ae6c-dfe3e55d16e6"). InnerVolumeSpecName "kube-api-access-lkftf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.018472 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "0032b760-b9d9-4533-ae6c-dfe3e55d16e6" (UID: "0032b760-b9d9-4533-ae6c-dfe3e55d16e6"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.019531 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "0032b760-b9d9-4533-ae6c-dfe3e55d16e6" (UID: "0032b760-b9d9-4533-ae6c-dfe3e55d16e6"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.111065 4959 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.111515 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkftf\" (UniqueName: \"kubernetes.io/projected/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-kube-api-access-lkftf\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.111530 4959 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.111546 4959 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.111558 4959 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0032b760-b9d9-4533-ae6c-dfe3e55d16e6-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.557141 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-b7ncr_0032b760-b9d9-4533-ae6c-dfe3e55d16e6/console/0.log" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.557215 4959 generic.go:334] "Generic (PLEG): container finished" podID="0032b760-b9d9-4533-ae6c-dfe3e55d16e6" containerID="1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634" exitCode=2 Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.557282 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-b7ncr" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.557365 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-b7ncr" event={"ID":"0032b760-b9d9-4533-ae6c-dfe3e55d16e6","Type":"ContainerDied","Data":"1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634"} Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.557428 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-b7ncr" event={"ID":"0032b760-b9d9-4533-ae6c-dfe3e55d16e6","Type":"ContainerDied","Data":"3159177cfe3f7be6135a83cc3bd67f789c63cf041e39687871a11fd52d484d8c"} Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.557455 4959 scope.go:117] "RemoveContainer" containerID="1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.560078 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"8e575dc9c25dda36f1b0b8c84111a641d3564e8c98ab1a5fe36fe70b774dfdfc"} Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.601561 4959 scope.go:117] "RemoveContainer" containerID="1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634" Jan 28 15:30:31 crc kubenswrapper[4959]: E0128 15:30:31.602184 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634\": container with ID starting with 1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634 not found: ID does not exist" containerID="1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.602228 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634"} err="failed to get container status \"1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634\": rpc error: code = NotFound desc = could not find container \"1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634\": container with ID starting with 1165e61049a341065ae1c5eb7cfe36f4b2e8816c57b5e715196966e7d746d634 not found: ID does not exist" Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.613597 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-b7ncr"] Jan 28 15:30:31 crc kubenswrapper[4959]: I0128 15:30:31.619420 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-b7ncr"] Jan 28 15:30:32 crc kubenswrapper[4959]: I0128 15:30:32.570483 4959 generic.go:334] "Generic (PLEG): container finished" podID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerID="8e59f979358d081b24c2aceb47667f04edb8a4ef306cf3327c288e7021cfbc6f" exitCode=0 Jan 28 15:30:32 crc kubenswrapper[4959]: I0128 15:30:32.570546 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" event={"ID":"2973aad5-f211-4e21-a51c-c1d0c79d1e99","Type":"ContainerDied","Data":"8e59f979358d081b24c2aceb47667f04edb8a4ef306cf3327c288e7021cfbc6f"} Jan 28 15:30:32 crc kubenswrapper[4959]: I0128 15:30:32.596480 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0032b760-b9d9-4533-ae6c-dfe3e55d16e6" path="/var/lib/kubelet/pods/0032b760-b9d9-4533-ae6c-dfe3e55d16e6/volumes" Jan 28 15:30:33 crc kubenswrapper[4959]: I0128 15:30:33.580334 4959 generic.go:334] "Generic (PLEG): container finished" podID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerID="2fb67b17453ac99abfee71220373cc343bcdb48ebe6ba2d13465078c110e3a88" exitCode=0 Jan 28 15:30:33 crc kubenswrapper[4959]: I0128 15:30:33.580396 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" event={"ID":"2973aad5-f211-4e21-a51c-c1d0c79d1e99","Type":"ContainerDied","Data":"2fb67b17453ac99abfee71220373cc343bcdb48ebe6ba2d13465078c110e3a88"} Jan 28 15:30:34 crc kubenswrapper[4959]: I0128 15:30:34.872533 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:34 crc kubenswrapper[4959]: I0128 15:30:34.966016 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-bundle\") pod \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " Jan 28 15:30:34 crc kubenswrapper[4959]: I0128 15:30:34.966698 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-util\") pod \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " Jan 28 15:30:34 crc kubenswrapper[4959]: I0128 15:30:34.966809 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znqp4\" (UniqueName: \"kubernetes.io/projected/2973aad5-f211-4e21-a51c-c1d0c79d1e99-kube-api-access-znqp4\") pod \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\" (UID: \"2973aad5-f211-4e21-a51c-c1d0c79d1e99\") " Jan 28 15:30:34 crc kubenswrapper[4959]: I0128 15:30:34.968145 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-bundle" (OuterVolumeSpecName: "bundle") pod "2973aad5-f211-4e21-a51c-c1d0c79d1e99" (UID: "2973aad5-f211-4e21-a51c-c1d0c79d1e99"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:30:34 crc kubenswrapper[4959]: I0128 15:30:34.974441 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2973aad5-f211-4e21-a51c-c1d0c79d1e99-kube-api-access-znqp4" (OuterVolumeSpecName: "kube-api-access-znqp4") pod "2973aad5-f211-4e21-a51c-c1d0c79d1e99" (UID: "2973aad5-f211-4e21-a51c-c1d0c79d1e99"). InnerVolumeSpecName "kube-api-access-znqp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:30:34 crc kubenswrapper[4959]: I0128 15:30:34.980903 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-util" (OuterVolumeSpecName: "util") pod "2973aad5-f211-4e21-a51c-c1d0c79d1e99" (UID: "2973aad5-f211-4e21-a51c-c1d0c79d1e99"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:30:35 crc kubenswrapper[4959]: I0128 15:30:35.068368 4959 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:35 crc kubenswrapper[4959]: I0128 15:30:35.068408 4959 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2973aad5-f211-4e21-a51c-c1d0c79d1e99-util\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:35 crc kubenswrapper[4959]: I0128 15:30:35.068417 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znqp4\" (UniqueName: \"kubernetes.io/projected/2973aad5-f211-4e21-a51c-c1d0c79d1e99-kube-api-access-znqp4\") on node \"crc\" DevicePath \"\"" Jan 28 15:30:35 crc kubenswrapper[4959]: I0128 15:30:35.598044 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" event={"ID":"2973aad5-f211-4e21-a51c-c1d0c79d1e99","Type":"ContainerDied","Data":"fe571e3cfc225080ed11e55d16a59aaba4efe140488fb00f73bc05a19094b72d"} Jan 28 15:30:35 crc kubenswrapper[4959]: I0128 15:30:35.598141 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe571e3cfc225080ed11e55d16a59aaba4efe140488fb00f73bc05a19094b72d" Jan 28 15:30:35 crc kubenswrapper[4959]: I0128 15:30:35.598176 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.971255 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb"] Jan 28 15:30:45 crc kubenswrapper[4959]: E0128 15:30:45.972341 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0032b760-b9d9-4533-ae6c-dfe3e55d16e6" containerName="console" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.972357 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="0032b760-b9d9-4533-ae6c-dfe3e55d16e6" containerName="console" Jan 28 15:30:45 crc kubenswrapper[4959]: E0128 15:30:45.972367 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerName="pull" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.972373 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerName="pull" Jan 28 15:30:45 crc kubenswrapper[4959]: E0128 15:30:45.972389 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerName="extract" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.972404 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerName="extract" Jan 28 15:30:45 crc kubenswrapper[4959]: E0128 15:30:45.972416 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerName="util" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.972421 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerName="util" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.972521 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="0032b760-b9d9-4533-ae6c-dfe3e55d16e6" containerName="console" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.972539 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="2973aad5-f211-4e21-a51c-c1d0c79d1e99" containerName="extract" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.973002 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.980798 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.981014 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-pzdcf" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.981023 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.981064 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.985707 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 28 15:30:45 crc kubenswrapper[4959]: I0128 15:30:45.998591 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb"] Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.137249 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/22993a88-bd9f-44f6-838c-da64f90a0cf3-webhook-cert\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.137333 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/22993a88-bd9f-44f6-838c-da64f90a0cf3-apiservice-cert\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.137393 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x6wb\" (UniqueName: \"kubernetes.io/projected/22993a88-bd9f-44f6-838c-da64f90a0cf3-kube-api-access-4x6wb\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.238744 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/22993a88-bd9f-44f6-838c-da64f90a0cf3-webhook-cert\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.238810 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/22993a88-bd9f-44f6-838c-da64f90a0cf3-apiservice-cert\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.238849 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x6wb\" (UniqueName: \"kubernetes.io/projected/22993a88-bd9f-44f6-838c-da64f90a0cf3-kube-api-access-4x6wb\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.249535 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/22993a88-bd9f-44f6-838c-da64f90a0cf3-apiservice-cert\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.253080 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5"] Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.254076 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.257834 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.258182 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-zwbsn" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.261915 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.265957 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/22993a88-bd9f-44f6-838c-da64f90a0cf3-webhook-cert\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.279852 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x6wb\" (UniqueName: \"kubernetes.io/projected/22993a88-bd9f-44f6-838c-da64f90a0cf3-kube-api-access-4x6wb\") pod \"metallb-operator-controller-manager-6bdc8c55c-q6xtb\" (UID: \"22993a88-bd9f-44f6-838c-da64f90a0cf3\") " pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.280553 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5"] Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.295311 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.341675 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c0f19574-cbac-4f58-b1ce-ecd885de0b11-webhook-cert\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.341810 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dl4kd\" (UniqueName: \"kubernetes.io/projected/c0f19574-cbac-4f58-b1ce-ecd885de0b11-kube-api-access-dl4kd\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.342035 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c0f19574-cbac-4f58-b1ce-ecd885de0b11-apiservice-cert\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.443679 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dl4kd\" (UniqueName: \"kubernetes.io/projected/c0f19574-cbac-4f58-b1ce-ecd885de0b11-kube-api-access-dl4kd\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.444236 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c0f19574-cbac-4f58-b1ce-ecd885de0b11-apiservice-cert\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.444394 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c0f19574-cbac-4f58-b1ce-ecd885de0b11-webhook-cert\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.449970 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c0f19574-cbac-4f58-b1ce-ecd885de0b11-webhook-cert\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.451709 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c0f19574-cbac-4f58-b1ce-ecd885de0b11-apiservice-cert\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.476865 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dl4kd\" (UniqueName: \"kubernetes.io/projected/c0f19574-cbac-4f58-b1ce-ecd885de0b11-kube-api-access-dl4kd\") pod \"metallb-operator-webhook-server-75d68c5fb8-wdpd5\" (UID: \"c0f19574-cbac-4f58-b1ce-ecd885de0b11\") " pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.674526 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.961068 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb"] Jan 28 15:30:46 crc kubenswrapper[4959]: I0128 15:30:46.998508 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5"] Jan 28 15:30:47 crc kubenswrapper[4959]: I0128 15:30:47.676406 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" event={"ID":"c0f19574-cbac-4f58-b1ce-ecd885de0b11","Type":"ContainerStarted","Data":"e6ddb4dadc697808886d71029ff8b7ad4c3331ac8a65879c391af6f77896c4e7"} Jan 28 15:30:47 crc kubenswrapper[4959]: I0128 15:30:47.678328 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" event={"ID":"22993a88-bd9f-44f6-838c-da64f90a0cf3","Type":"ContainerStarted","Data":"286763bbda5057c1682bb09fbc012f5f69881be89365b5542354978f58cf7599"} Jan 28 15:30:54 crc kubenswrapper[4959]: I0128 15:30:54.758705 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" event={"ID":"c0f19574-cbac-4f58-b1ce-ecd885de0b11","Type":"ContainerStarted","Data":"ee798366318ff9fe5dec7771d9af5e553bb70baa7a4ba26242c29a62fec2f07d"} Jan 28 15:30:54 crc kubenswrapper[4959]: I0128 15:30:54.759872 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:30:54 crc kubenswrapper[4959]: I0128 15:30:54.763436 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" event={"ID":"22993a88-bd9f-44f6-838c-da64f90a0cf3","Type":"ContainerStarted","Data":"adbf4c1af64a9470cd3d1bda7bfc7c9af9bb8b3f8c609174f9d3f6b072a2c8fb"} Jan 28 15:30:54 crc kubenswrapper[4959]: I0128 15:30:54.763637 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:30:54 crc kubenswrapper[4959]: I0128 15:30:54.780454 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" podStartSLOduration=2.146089963 podStartE2EDuration="8.780429383s" podCreationTimestamp="2026-01-28 15:30:46 +0000 UTC" firstStartedPulling="2026-01-28 15:30:47.021353581 +0000 UTC m=+830.467259964" lastFinishedPulling="2026-01-28 15:30:53.655693001 +0000 UTC m=+837.101599384" observedRunningTime="2026-01-28 15:30:54.778892558 +0000 UTC m=+838.224798941" watchObservedRunningTime="2026-01-28 15:30:54.780429383 +0000 UTC m=+838.226335766" Jan 28 15:30:54 crc kubenswrapper[4959]: I0128 15:30:54.814601 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" podStartSLOduration=3.173330493 podStartE2EDuration="9.814575741s" podCreationTimestamp="2026-01-28 15:30:45 +0000 UTC" firstStartedPulling="2026-01-28 15:30:46.987608752 +0000 UTC m=+830.433515135" lastFinishedPulling="2026-01-28 15:30:53.628854 +0000 UTC m=+837.074760383" observedRunningTime="2026-01-28 15:30:54.806987897 +0000 UTC m=+838.252894300" watchObservedRunningTime="2026-01-28 15:30:54.814575741 +0000 UTC m=+838.260482124" Jan 28 15:31:06 crc kubenswrapper[4959]: I0128 15:31:06.686330 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-75d68c5fb8-wdpd5" Jan 28 15:31:26 crc kubenswrapper[4959]: I0128 15:31:26.300252 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6bdc8c55c-q6xtb" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.002853 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-bzd7h"] Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.007254 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.009643 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.010230 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-sgxq8" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.010610 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h"] Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.011638 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.012390 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.014761 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.024160 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h"] Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.115272 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-84w8q"] Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.116541 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.118824 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.119532 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.119653 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.119835 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-v2fvv" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.149049 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-2fnlx"] Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.150065 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.152243 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153225 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49fb622a-9b72-4a64-9d53-31b4871d1fe2-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-rdt2h\" (UID: \"49fb622a-9b72-4a64-9d53-31b4871d1fe2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153265 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-reloader\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153291 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-startup\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153312 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-metrics\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153351 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-conf\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153370 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-metrics-certs\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153396 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-sockets\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153445 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6tvs\" (UniqueName: \"kubernetes.io/projected/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-kube-api-access-l6tvs\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.153471 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6hhz\" (UniqueName: \"kubernetes.io/projected/49fb622a-9b72-4a64-9d53-31b4871d1fe2-kube-api-access-v6hhz\") pod \"frr-k8s-webhook-server-7df86c4f6c-rdt2h\" (UID: \"49fb622a-9b72-4a64-9d53-31b4871d1fe2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.168445 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-2fnlx"] Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255083 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-conf\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255158 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-metrics-certs\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255186 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdmgx\" (UniqueName: \"kubernetes.io/projected/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-kube-api-access-pdmgx\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255223 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-sockets\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255264 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255297 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6tvs\" (UniqueName: \"kubernetes.io/projected/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-kube-api-access-l6tvs\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255318 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1924ef1e-9d1d-407b-a368-885c1f5eb373-metallb-excludel2\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255336 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-cert\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255354 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-metrics-certs\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255374 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6hhz\" (UniqueName: \"kubernetes.io/projected/49fb622a-9b72-4a64-9d53-31b4871d1fe2-kube-api-access-v6hhz\") pod \"frr-k8s-webhook-server-7df86c4f6c-rdt2h\" (UID: \"49fb622a-9b72-4a64-9d53-31b4871d1fe2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255409 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49fb622a-9b72-4a64-9d53-31b4871d1fe2-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-rdt2h\" (UID: \"49fb622a-9b72-4a64-9d53-31b4871d1fe2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255425 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-reloader\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255446 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-startup\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255462 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-metrics\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255482 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-metrics-certs\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.255511 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86kpz\" (UniqueName: \"kubernetes.io/projected/1924ef1e-9d1d-407b-a368-885c1f5eb373-kube-api-access-86kpz\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.256006 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-sockets\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.256573 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-conf\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.256721 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-reloader\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.256587 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-frr-startup\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.256914 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-metrics\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.265201 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49fb622a-9b72-4a64-9d53-31b4871d1fe2-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-rdt2h\" (UID: \"49fb622a-9b72-4a64-9d53-31b4871d1fe2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.275714 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6tvs\" (UniqueName: \"kubernetes.io/projected/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-kube-api-access-l6tvs\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.276042 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6hhz\" (UniqueName: \"kubernetes.io/projected/49fb622a-9b72-4a64-9d53-31b4871d1fe2-kube-api-access-v6hhz\") pod \"frr-k8s-webhook-server-7df86c4f6c-rdt2h\" (UID: \"49fb622a-9b72-4a64-9d53-31b4871d1fe2\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.280966 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0e4cb52a-0e2e-4c10-bd94-ef235dadad01-metrics-certs\") pod \"frr-k8s-bzd7h\" (UID: \"0e4cb52a-0e2e-4c10-bd94-ef235dadad01\") " pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.328459 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.340731 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.361211 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-metrics-certs\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.361435 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86kpz\" (UniqueName: \"kubernetes.io/projected/1924ef1e-9d1d-407b-a368-885c1f5eb373-kube-api-access-86kpz\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.361464 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdmgx\" (UniqueName: \"kubernetes.io/projected/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-kube-api-access-pdmgx\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.361508 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.361538 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1924ef1e-9d1d-407b-a368-885c1f5eb373-metallb-excludel2\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.361554 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-cert\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.361572 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-metrics-certs\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: E0128 15:31:27.361372 4959 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Jan 28 15:31:27 crc kubenswrapper[4959]: E0128 15:31:27.361712 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-metrics-certs podName:fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb nodeName:}" failed. No retries permitted until 2026-01-28 15:31:27.861687605 +0000 UTC m=+871.307593988 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-metrics-certs") pod "controller-6968d8fdc4-2fnlx" (UID: "fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb") : secret "controller-certs-secret" not found Jan 28 15:31:27 crc kubenswrapper[4959]: E0128 15:31:27.362387 4959 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 15:31:27 crc kubenswrapper[4959]: E0128 15:31:27.362423 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist podName:1924ef1e-9d1d-407b-a368-885c1f5eb373 nodeName:}" failed. No retries permitted until 2026-01-28 15:31:27.862414341 +0000 UTC m=+871.308320714 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist") pod "speaker-84w8q" (UID: "1924ef1e-9d1d-407b-a368-885c1f5eb373") : secret "metallb-memberlist" not found Jan 28 15:31:27 crc kubenswrapper[4959]: E0128 15:31:27.361647 4959 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 28 15:31:27 crc kubenswrapper[4959]: E0128 15:31:27.362877 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-metrics-certs podName:1924ef1e-9d1d-407b-a368-885c1f5eb373 nodeName:}" failed. No retries permitted until 2026-01-28 15:31:27.862835882 +0000 UTC m=+871.308742265 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-metrics-certs") pod "speaker-84w8q" (UID: "1924ef1e-9d1d-407b-a368-885c1f5eb373") : secret "speaker-certs-secret" not found Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.364573 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1924ef1e-9d1d-407b-a368-885c1f5eb373-metallb-excludel2\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.364771 4959 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.379153 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-cert\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.393703 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86kpz\" (UniqueName: \"kubernetes.io/projected/1924ef1e-9d1d-407b-a368-885c1f5eb373-kube-api-access-86kpz\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.394824 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdmgx\" (UniqueName: \"kubernetes.io/projected/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-kube-api-access-pdmgx\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.923839 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.924374 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-metrics-certs\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.924660 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-metrics-certs\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: E0128 15:31:27.924663 4959 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 15:31:27 crc kubenswrapper[4959]: E0128 15:31:27.924780 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist podName:1924ef1e-9d1d-407b-a368-885c1f5eb373 nodeName:}" failed. No retries permitted until 2026-01-28 15:31:28.924755347 +0000 UTC m=+872.370661730 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist") pod "speaker-84w8q" (UID: "1924ef1e-9d1d-407b-a368-885c1f5eb373") : secret "metallb-memberlist" not found Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.931713 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-metrics-certs\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.935848 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb-metrics-certs\") pod \"controller-6968d8fdc4-2fnlx\" (UID: \"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb\") " pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:27 crc kubenswrapper[4959]: I0128 15:31:27.988891 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h"] Jan 28 15:31:28 crc kubenswrapper[4959]: W0128 15:31:28.010173 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49fb622a_9b72_4a64_9d53_31b4871d1fe2.slice/crio-633bebed7a3d73b4a2a19b0466ce321b3620724975562b1018b2ce1e6505c84b WatchSource:0}: Error finding container 633bebed7a3d73b4a2a19b0466ce321b3620724975562b1018b2ce1e6505c84b: Status 404 returned error can't find the container with id 633bebed7a3d73b4a2a19b0466ce321b3620724975562b1018b2ce1e6505c84b Jan 28 15:31:28 crc kubenswrapper[4959]: I0128 15:31:28.070409 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:28 crc kubenswrapper[4959]: I0128 15:31:28.302178 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-2fnlx"] Jan 28 15:31:28 crc kubenswrapper[4959]: W0128 15:31:28.309528 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb36ee39_935f_4b4a_9f68_d0d10dc6eaeb.slice/crio-ec4bba3438b163a21e3a4d7fd8cbe810b2190d51a2eaa996d0a4dee63263e121 WatchSource:0}: Error finding container ec4bba3438b163a21e3a4d7fd8cbe810b2190d51a2eaa996d0a4dee63263e121: Status 404 returned error can't find the container with id ec4bba3438b163a21e3a4d7fd8cbe810b2190d51a2eaa996d0a4dee63263e121 Jan 28 15:31:28 crc kubenswrapper[4959]: I0128 15:31:28.943781 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:28 crc kubenswrapper[4959]: E0128 15:31:28.944024 4959 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 15:31:28 crc kubenswrapper[4959]: E0128 15:31:28.944496 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist podName:1924ef1e-9d1d-407b-a368-885c1f5eb373 nodeName:}" failed. No retries permitted until 2026-01-28 15:31:30.944474185 +0000 UTC m=+874.390380568 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist") pod "speaker-84w8q" (UID: "1924ef1e-9d1d-407b-a368-885c1f5eb373") : secret "metallb-memberlist" not found Jan 28 15:31:28 crc kubenswrapper[4959]: I0128 15:31:28.984041 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-2fnlx" event={"ID":"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb","Type":"ContainerStarted","Data":"03dfef07641cb9e1c4a01c275ddf39e8f614bd652a5a4aaa5bb8572116982e6a"} Jan 28 15:31:28 crc kubenswrapper[4959]: I0128 15:31:28.984126 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-2fnlx" event={"ID":"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb","Type":"ContainerStarted","Data":"ec4bba3438b163a21e3a4d7fd8cbe810b2190d51a2eaa996d0a4dee63263e121"} Jan 28 15:31:28 crc kubenswrapper[4959]: I0128 15:31:28.985656 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerStarted","Data":"87a2c8dde00415911b0eeb20e9571025f7367efc57af4e608e21c68bafbee66c"} Jan 28 15:31:28 crc kubenswrapper[4959]: I0128 15:31:28.987211 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" event={"ID":"49fb622a-9b72-4a64-9d53-31b4871d1fe2","Type":"ContainerStarted","Data":"633bebed7a3d73b4a2a19b0466ce321b3620724975562b1018b2ce1e6505c84b"} Jan 28 15:31:29 crc kubenswrapper[4959]: I0128 15:31:29.996658 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-2fnlx" event={"ID":"fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb","Type":"ContainerStarted","Data":"3d35f2bcda64e50c70ae739d80dffccdb1d2717d7ad6b2fe84971e390bce110a"} Jan 28 15:31:29 crc kubenswrapper[4959]: I0128 15:31:29.996879 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:30 crc kubenswrapper[4959]: I0128 15:31:30.022098 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-2fnlx" podStartSLOduration=3.022078703 podStartE2EDuration="3.022078703s" podCreationTimestamp="2026-01-28 15:31:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:31:30.021854968 +0000 UTC m=+873.467761371" watchObservedRunningTime="2026-01-28 15:31:30.022078703 +0000 UTC m=+873.467985086" Jan 28 15:31:31 crc kubenswrapper[4959]: I0128 15:31:31.035400 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:31 crc kubenswrapper[4959]: I0128 15:31:31.055280 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1924ef1e-9d1d-407b-a368-885c1f5eb373-memberlist\") pod \"speaker-84w8q\" (UID: \"1924ef1e-9d1d-407b-a368-885c1f5eb373\") " pod="metallb-system/speaker-84w8q" Jan 28 15:31:31 crc kubenswrapper[4959]: I0128 15:31:31.332205 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-84w8q" Jan 28 15:31:31 crc kubenswrapper[4959]: W0128 15:31:31.366909 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1924ef1e_9d1d_407b_a368_885c1f5eb373.slice/crio-474ebf0335eb2a4dcc088a39f8b2e211c7cc4a9afe8e7d7640aa94f96c9143f9 WatchSource:0}: Error finding container 474ebf0335eb2a4dcc088a39f8b2e211c7cc4a9afe8e7d7640aa94f96c9143f9: Status 404 returned error can't find the container with id 474ebf0335eb2a4dcc088a39f8b2e211c7cc4a9afe8e7d7640aa94f96c9143f9 Jan 28 15:31:32 crc kubenswrapper[4959]: I0128 15:31:32.031266 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-84w8q" event={"ID":"1924ef1e-9d1d-407b-a368-885c1f5eb373","Type":"ContainerStarted","Data":"a09473905f19578cabb99332ca5cf8346bc1a28ab6203ba761d6a807c0142f8e"} Jan 28 15:31:32 crc kubenswrapper[4959]: I0128 15:31:32.031755 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-84w8q" event={"ID":"1924ef1e-9d1d-407b-a368-885c1f5eb373","Type":"ContainerStarted","Data":"474ebf0335eb2a4dcc088a39f8b2e211c7cc4a9afe8e7d7640aa94f96c9143f9"} Jan 28 15:31:33 crc kubenswrapper[4959]: I0128 15:31:33.039932 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-84w8q" event={"ID":"1924ef1e-9d1d-407b-a368-885c1f5eb373","Type":"ContainerStarted","Data":"121db2be662e6a89313d6068e02e4b39bb881464e7c0e9bc6a651a5ec4f76cd3"} Jan 28 15:31:33 crc kubenswrapper[4959]: I0128 15:31:33.040125 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-84w8q" Jan 28 15:31:33 crc kubenswrapper[4959]: I0128 15:31:33.063145 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-84w8q" podStartSLOduration=6.063124331 podStartE2EDuration="6.063124331s" podCreationTimestamp="2026-01-28 15:31:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:31:33.058170143 +0000 UTC m=+876.504076546" watchObservedRunningTime="2026-01-28 15:31:33.063124331 +0000 UTC m=+876.509030724" Jan 28 15:31:38 crc kubenswrapper[4959]: I0128 15:31:38.076017 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-2fnlx" Jan 28 15:31:38 crc kubenswrapper[4959]: I0128 15:31:38.088573 4959 generic.go:334] "Generic (PLEG): container finished" podID="0e4cb52a-0e2e-4c10-bd94-ef235dadad01" containerID="1f2d886db284bad2b61c04085640eb4ff6f2e1076340317e5365a26ce5c00357" exitCode=0 Jan 28 15:31:38 crc kubenswrapper[4959]: I0128 15:31:38.088632 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerDied","Data":"1f2d886db284bad2b61c04085640eb4ff6f2e1076340317e5365a26ce5c00357"} Jan 28 15:31:39 crc kubenswrapper[4959]: I0128 15:31:39.096483 4959 generic.go:334] "Generic (PLEG): container finished" podID="0e4cb52a-0e2e-4c10-bd94-ef235dadad01" containerID="bee04f4c9928ce47e76f0c46ca4ec6d0e6dfcf727c3bf76dc6a97f1dd8a0516a" exitCode=0 Jan 28 15:31:39 crc kubenswrapper[4959]: I0128 15:31:39.096536 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerDied","Data":"bee04f4c9928ce47e76f0c46ca4ec6d0e6dfcf727c3bf76dc6a97f1dd8a0516a"} Jan 28 15:31:40 crc kubenswrapper[4959]: I0128 15:31:40.106449 4959 generic.go:334] "Generic (PLEG): container finished" podID="0e4cb52a-0e2e-4c10-bd94-ef235dadad01" containerID="d508bb04a698ec09337a7fdd555afb3939f8306d49edef922ec82c18e4534894" exitCode=0 Jan 28 15:31:40 crc kubenswrapper[4959]: I0128 15:31:40.106550 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerDied","Data":"d508bb04a698ec09337a7fdd555afb3939f8306d49edef922ec82c18e4534894"} Jan 28 15:31:40 crc kubenswrapper[4959]: I0128 15:31:40.109051 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" event={"ID":"49fb622a-9b72-4a64-9d53-31b4871d1fe2","Type":"ContainerStarted","Data":"25eb8af56665c9addc2467ec5256d4494d0d9e19b9c00ed78b261527fb353232"} Jan 28 15:31:40 crc kubenswrapper[4959]: I0128 15:31:40.109554 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:31:40 crc kubenswrapper[4959]: I0128 15:31:40.147711 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" podStartSLOduration=2.753177694 podStartE2EDuration="14.147689347s" podCreationTimestamp="2026-01-28 15:31:26 +0000 UTC" firstStartedPulling="2026-01-28 15:31:28.014064679 +0000 UTC m=+871.459971062" lastFinishedPulling="2026-01-28 15:31:39.408576332 +0000 UTC m=+882.854482715" observedRunningTime="2026-01-28 15:31:40.145050924 +0000 UTC m=+883.590957307" watchObservedRunningTime="2026-01-28 15:31:40.147689347 +0000 UTC m=+883.593595730" Jan 28 15:31:41 crc kubenswrapper[4959]: I0128 15:31:41.122167 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerStarted","Data":"d3d297d5147560243b97a344bf54bbbb400203807163bc41378159fb78d636f4"} Jan 28 15:31:41 crc kubenswrapper[4959]: I0128 15:31:41.122597 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerStarted","Data":"b903691d223849c951c36407d6a98979b3a6d87f05b290cffa5260efbf906278"} Jan 28 15:31:41 crc kubenswrapper[4959]: I0128 15:31:41.122613 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerStarted","Data":"24c338331a3890094fcee21ce75113e31574c82756f84efa0b718690a2b99d25"} Jan 28 15:31:41 crc kubenswrapper[4959]: I0128 15:31:41.122623 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerStarted","Data":"a46079d7fade0f03a4f68d7519a6794ab547d8c8d2484a294918d1ec0893c854"} Jan 28 15:31:41 crc kubenswrapper[4959]: I0128 15:31:41.340735 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-84w8q" Jan 28 15:31:42 crc kubenswrapper[4959]: I0128 15:31:42.136585 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerStarted","Data":"6ebd2476aef3d2bdfecc4101507b97c98129ad2ffbc93908719ac5715b10ffe7"} Jan 28 15:31:42 crc kubenswrapper[4959]: I0128 15:31:42.137231 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:42 crc kubenswrapper[4959]: I0128 15:31:42.137292 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-bzd7h" event={"ID":"0e4cb52a-0e2e-4c10-bd94-ef235dadad01","Type":"ContainerStarted","Data":"5238f5a88a1fa0ce305d085c95c01164cbdbbfa6951d294cafd54ba31de9f7ed"} Jan 28 15:31:42 crc kubenswrapper[4959]: I0128 15:31:42.164071 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-bzd7h" podStartSLOduration=7.922778055 podStartE2EDuration="16.164045469s" podCreationTimestamp="2026-01-28 15:31:26 +0000 UTC" firstStartedPulling="2026-01-28 15:31:28.813797496 +0000 UTC m=+872.259703879" lastFinishedPulling="2026-01-28 15:31:37.05506486 +0000 UTC m=+880.500971293" observedRunningTime="2026-01-28 15:31:42.160095295 +0000 UTC m=+885.606001698" watchObservedRunningTime="2026-01-28 15:31:42.164045469 +0000 UTC m=+885.609951852" Jan 28 15:31:42 crc kubenswrapper[4959]: I0128 15:31:42.328918 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:42 crc kubenswrapper[4959]: I0128 15:31:42.372498 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:47 crc kubenswrapper[4959]: I0128 15:31:47.834406 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-czpn5"] Jan 28 15:31:47 crc kubenswrapper[4959]: I0128 15:31:47.836623 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-czpn5" Jan 28 15:31:47 crc kubenswrapper[4959]: I0128 15:31:47.839716 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 28 15:31:47 crc kubenswrapper[4959]: I0128 15:31:47.840163 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-ttbv7" Jan 28 15:31:47 crc kubenswrapper[4959]: I0128 15:31:47.840895 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-czpn5"] Jan 28 15:31:47 crc kubenswrapper[4959]: I0128 15:31:47.842837 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 28 15:31:47 crc kubenswrapper[4959]: I0128 15:31:47.964141 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh7ff\" (UniqueName: \"kubernetes.io/projected/f5e07243-6723-4bf1-9c42-7c3b470e100b-kube-api-access-mh7ff\") pod \"openstack-operator-index-czpn5\" (UID: \"f5e07243-6723-4bf1-9c42-7c3b470e100b\") " pod="openstack-operators/openstack-operator-index-czpn5" Jan 28 15:31:48 crc kubenswrapper[4959]: I0128 15:31:48.065988 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh7ff\" (UniqueName: \"kubernetes.io/projected/f5e07243-6723-4bf1-9c42-7c3b470e100b-kube-api-access-mh7ff\") pod \"openstack-operator-index-czpn5\" (UID: \"f5e07243-6723-4bf1-9c42-7c3b470e100b\") " pod="openstack-operators/openstack-operator-index-czpn5" Jan 28 15:31:48 crc kubenswrapper[4959]: I0128 15:31:48.086305 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh7ff\" (UniqueName: \"kubernetes.io/projected/f5e07243-6723-4bf1-9c42-7c3b470e100b-kube-api-access-mh7ff\") pod \"openstack-operator-index-czpn5\" (UID: \"f5e07243-6723-4bf1-9c42-7c3b470e100b\") " pod="openstack-operators/openstack-operator-index-czpn5" Jan 28 15:31:48 crc kubenswrapper[4959]: I0128 15:31:48.153267 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-czpn5" Jan 28 15:31:48 crc kubenswrapper[4959]: I0128 15:31:48.704446 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-czpn5"] Jan 28 15:31:49 crc kubenswrapper[4959]: I0128 15:31:49.213481 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-czpn5" event={"ID":"f5e07243-6723-4bf1-9c42-7c3b470e100b","Type":"ContainerStarted","Data":"79363ea06415c316d50c8dbc571633e9bd6b7ece93b5f81cb5fca373a6f46860"} Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.020482 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-czpn5"] Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.247018 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-czpn5" event={"ID":"f5e07243-6723-4bf1-9c42-7c3b470e100b","Type":"ContainerStarted","Data":"718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb"} Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.264557 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-czpn5" podStartSLOduration=2.58465435 podStartE2EDuration="6.264532202s" podCreationTimestamp="2026-01-28 15:31:47 +0000 UTC" firstStartedPulling="2026-01-28 15:31:48.720348136 +0000 UTC m=+892.166254519" lastFinishedPulling="2026-01-28 15:31:52.400225988 +0000 UTC m=+895.846132371" observedRunningTime="2026-01-28 15:31:53.261992072 +0000 UTC m=+896.707898465" watchObservedRunningTime="2026-01-28 15:31:53.264532202 +0000 UTC m=+896.710438585" Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.633647 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-6t6ww"] Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.635202 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.646072 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6t6ww"] Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.740621 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dl9wg\" (UniqueName: \"kubernetes.io/projected/9131b387-d052-4f47-9c63-251dafdaf37f-kube-api-access-dl9wg\") pod \"openstack-operator-index-6t6ww\" (UID: \"9131b387-d052-4f47-9c63-251dafdaf37f\") " pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.843295 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dl9wg\" (UniqueName: \"kubernetes.io/projected/9131b387-d052-4f47-9c63-251dafdaf37f-kube-api-access-dl9wg\") pod \"openstack-operator-index-6t6ww\" (UID: \"9131b387-d052-4f47-9c63-251dafdaf37f\") " pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.875880 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dl9wg\" (UniqueName: \"kubernetes.io/projected/9131b387-d052-4f47-9c63-251dafdaf37f-kube-api-access-dl9wg\") pod \"openstack-operator-index-6t6ww\" (UID: \"9131b387-d052-4f47-9c63-251dafdaf37f\") " pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:31:53 crc kubenswrapper[4959]: I0128 15:31:53.966463 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:31:54 crc kubenswrapper[4959]: I0128 15:31:54.256945 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-czpn5" podUID="f5e07243-6723-4bf1-9c42-7c3b470e100b" containerName="registry-server" containerID="cri-o://718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb" gracePeriod=2 Jan 28 15:31:54 crc kubenswrapper[4959]: I0128 15:31:54.260581 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-6t6ww"] Jan 28 15:31:54 crc kubenswrapper[4959]: W0128 15:31:54.279459 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9131b387_d052_4f47_9c63_251dafdaf37f.slice/crio-43bea25df6fa5f6764e69225e9a8d8fe99b03aea6d80899980b330bbcb6ccccf WatchSource:0}: Error finding container 43bea25df6fa5f6764e69225e9a8d8fe99b03aea6d80899980b330bbcb6ccccf: Status 404 returned error can't find the container with id 43bea25df6fa5f6764e69225e9a8d8fe99b03aea6d80899980b330bbcb6ccccf Jan 28 15:31:54 crc kubenswrapper[4959]: I0128 15:31:54.649844 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-czpn5" Jan 28 15:31:54 crc kubenswrapper[4959]: I0128 15:31:54.758038 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh7ff\" (UniqueName: \"kubernetes.io/projected/f5e07243-6723-4bf1-9c42-7c3b470e100b-kube-api-access-mh7ff\") pod \"f5e07243-6723-4bf1-9c42-7c3b470e100b\" (UID: \"f5e07243-6723-4bf1-9c42-7c3b470e100b\") " Jan 28 15:31:54 crc kubenswrapper[4959]: I0128 15:31:54.772410 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5e07243-6723-4bf1-9c42-7c3b470e100b-kube-api-access-mh7ff" (OuterVolumeSpecName: "kube-api-access-mh7ff") pod "f5e07243-6723-4bf1-9c42-7c3b470e100b" (UID: "f5e07243-6723-4bf1-9c42-7c3b470e100b"). InnerVolumeSpecName "kube-api-access-mh7ff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:31:54 crc kubenswrapper[4959]: I0128 15:31:54.859975 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh7ff\" (UniqueName: \"kubernetes.io/projected/f5e07243-6723-4bf1-9c42-7c3b470e100b-kube-api-access-mh7ff\") on node \"crc\" DevicePath \"\"" Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.267331 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6t6ww" event={"ID":"9131b387-d052-4f47-9c63-251dafdaf37f","Type":"ContainerStarted","Data":"4ad7b0d47f6b66ca5d83386ef9c37478064890f733056eb5b422de2fdbbfbd0e"} Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.267392 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-6t6ww" event={"ID":"9131b387-d052-4f47-9c63-251dafdaf37f","Type":"ContainerStarted","Data":"43bea25df6fa5f6764e69225e9a8d8fe99b03aea6d80899980b330bbcb6ccccf"} Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.270229 4959 generic.go:334] "Generic (PLEG): container finished" podID="f5e07243-6723-4bf1-9c42-7c3b470e100b" containerID="718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb" exitCode=0 Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.270281 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-czpn5" event={"ID":"f5e07243-6723-4bf1-9c42-7c3b470e100b","Type":"ContainerDied","Data":"718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb"} Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.270311 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-czpn5" event={"ID":"f5e07243-6723-4bf1-9c42-7c3b470e100b","Type":"ContainerDied","Data":"79363ea06415c316d50c8dbc571633e9bd6b7ece93b5f81cb5fca373a6f46860"} Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.270336 4959 scope.go:117] "RemoveContainer" containerID="718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb" Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.270481 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-czpn5" Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.283816 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-6t6ww" podStartSLOduration=2.2402672040000002 podStartE2EDuration="2.283794303s" podCreationTimestamp="2026-01-28 15:31:53 +0000 UTC" firstStartedPulling="2026-01-28 15:31:54.28292634 +0000 UTC m=+897.728832723" lastFinishedPulling="2026-01-28 15:31:54.326453439 +0000 UTC m=+897.772359822" observedRunningTime="2026-01-28 15:31:55.283472146 +0000 UTC m=+898.729378529" watchObservedRunningTime="2026-01-28 15:31:55.283794303 +0000 UTC m=+898.729700686" Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.298959 4959 scope.go:117] "RemoveContainer" containerID="718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb" Jan 28 15:31:55 crc kubenswrapper[4959]: E0128 15:31:55.302872 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb\": container with ID starting with 718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb not found: ID does not exist" containerID="718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb" Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.302917 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb"} err="failed to get container status \"718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb\": rpc error: code = NotFound desc = could not find container \"718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb\": container with ID starting with 718238e2e391f7d4dae7de36d8b0d9ff5f8f04c5c3ffa2ec9b1f9133884f40cb not found: ID does not exist" Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.309332 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-czpn5"] Jan 28 15:31:55 crc kubenswrapper[4959]: I0128 15:31:55.312998 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-czpn5"] Jan 28 15:31:56 crc kubenswrapper[4959]: I0128 15:31:56.612759 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5e07243-6723-4bf1-9c42-7c3b470e100b" path="/var/lib/kubelet/pods/f5e07243-6723-4bf1-9c42-7c3b470e100b/volumes" Jan 28 15:31:57 crc kubenswrapper[4959]: I0128 15:31:57.332987 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-bzd7h" Jan 28 15:31:57 crc kubenswrapper[4959]: I0128 15:31:57.395417 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rdt2h" Jan 28 15:32:03 crc kubenswrapper[4959]: I0128 15:32:03.967935 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:32:03 crc kubenswrapper[4959]: I0128 15:32:03.969030 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:32:04 crc kubenswrapper[4959]: I0128 15:32:04.002467 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:32:04 crc kubenswrapper[4959]: I0128 15:32:04.359725 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-6t6ww" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.639679 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xbbnj"] Jan 28 15:32:15 crc kubenswrapper[4959]: E0128 15:32:15.640908 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5e07243-6723-4bf1-9c42-7c3b470e100b" containerName="registry-server" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.640926 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5e07243-6723-4bf1-9c42-7c3b470e100b" containerName="registry-server" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.641124 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5e07243-6723-4bf1-9c42-7c3b470e100b" containerName="registry-server" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.642293 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.661888 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xbbnj"] Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.676292 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-catalog-content\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.676371 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4bcb\" (UniqueName: \"kubernetes.io/projected/a26ca325-a168-4f02-9c01-43b7a90ab37d-kube-api-access-g4bcb\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.677247 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-utilities\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.779381 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-utilities\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.779449 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-catalog-content\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.779480 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4bcb\" (UniqueName: \"kubernetes.io/projected/a26ca325-a168-4f02-9c01-43b7a90ab37d-kube-api-access-g4bcb\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.780387 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-catalog-content\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.780482 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-utilities\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.806152 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4bcb\" (UniqueName: \"kubernetes.io/projected/a26ca325-a168-4f02-9c01-43b7a90ab37d-kube-api-access-g4bcb\") pod \"community-operators-xbbnj\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:15 crc kubenswrapper[4959]: I0128 15:32:15.965432 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:16 crc kubenswrapper[4959]: I0128 15:32:16.312996 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xbbnj"] Jan 28 15:32:16 crc kubenswrapper[4959]: W0128 15:32:16.324384 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda26ca325_a168_4f02_9c01_43b7a90ab37d.slice/crio-f9fe11e382d60667746b4993959ee6c788ed54ac672166bfa302bb39fe0c7aed WatchSource:0}: Error finding container f9fe11e382d60667746b4993959ee6c788ed54ac672166bfa302bb39fe0c7aed: Status 404 returned error can't find the container with id f9fe11e382d60667746b4993959ee6c788ed54ac672166bfa302bb39fe0c7aed Jan 28 15:32:16 crc kubenswrapper[4959]: I0128 15:32:16.413463 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbbnj" event={"ID":"a26ca325-a168-4f02-9c01-43b7a90ab37d","Type":"ContainerStarted","Data":"f9fe11e382d60667746b4993959ee6c788ed54ac672166bfa302bb39fe0c7aed"} Jan 28 15:32:17 crc kubenswrapper[4959]: I0128 15:32:17.423104 4959 generic.go:334] "Generic (PLEG): container finished" podID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerID="431026795d15f718746188ba11ad6b898c1af999dda75b7d468bee03f178b14e" exitCode=0 Jan 28 15:32:17 crc kubenswrapper[4959]: I0128 15:32:17.423225 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbbnj" event={"ID":"a26ca325-a168-4f02-9c01-43b7a90ab37d","Type":"ContainerDied","Data":"431026795d15f718746188ba11ad6b898c1af999dda75b7d468bee03f178b14e"} Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.269949 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45"] Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.271345 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.283636 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45"] Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.289982 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-8pg8q" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.317045 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-bundle\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.317148 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdsx7\" (UniqueName: \"kubernetes.io/projected/db003231-aaa9-4d1d-9d56-046741bb4b32-kube-api-access-tdsx7\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.317420 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-util\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.419574 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-util\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.419677 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-bundle\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.419709 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdsx7\" (UniqueName: \"kubernetes.io/projected/db003231-aaa9-4d1d-9d56-046741bb4b32-kube-api-access-tdsx7\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.420267 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-util\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.420383 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-bundle\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.436508 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbbnj" event={"ID":"a26ca325-a168-4f02-9c01-43b7a90ab37d","Type":"ContainerStarted","Data":"6ee771fc347b44de8b7641ef9d9a065ff7a8bf653d095c84863aa3f6bbab0b46"} Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.456588 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdsx7\" (UniqueName: \"kubernetes.io/projected/db003231-aaa9-4d1d-9d56-046741bb4b32-kube-api-access-tdsx7\") pod \"ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:18 crc kubenswrapper[4959]: I0128 15:32:18.601040 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:19 crc kubenswrapper[4959]: I0128 15:32:19.009469 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45"] Jan 28 15:32:19 crc kubenswrapper[4959]: W0128 15:32:19.018501 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb003231_aaa9_4d1d_9d56_046741bb4b32.slice/crio-8886d1b2004fe737a0694653f4529f321c73115661d8ddc2e1aed3e6d7fd7b4a WatchSource:0}: Error finding container 8886d1b2004fe737a0694653f4529f321c73115661d8ddc2e1aed3e6d7fd7b4a: Status 404 returned error can't find the container with id 8886d1b2004fe737a0694653f4529f321c73115661d8ddc2e1aed3e6d7fd7b4a Jan 28 15:32:19 crc kubenswrapper[4959]: I0128 15:32:19.443629 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" event={"ID":"db003231-aaa9-4d1d-9d56-046741bb4b32","Type":"ContainerStarted","Data":"8886d1b2004fe737a0694653f4529f321c73115661d8ddc2e1aed3e6d7fd7b4a"} Jan 28 15:32:19 crc kubenswrapper[4959]: I0128 15:32:19.445509 4959 generic.go:334] "Generic (PLEG): container finished" podID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerID="6ee771fc347b44de8b7641ef9d9a065ff7a8bf653d095c84863aa3f6bbab0b46" exitCode=0 Jan 28 15:32:19 crc kubenswrapper[4959]: I0128 15:32:19.445560 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbbnj" event={"ID":"a26ca325-a168-4f02-9c01-43b7a90ab37d","Type":"ContainerDied","Data":"6ee771fc347b44de8b7641ef9d9a065ff7a8bf653d095c84863aa3f6bbab0b46"} Jan 28 15:32:20 crc kubenswrapper[4959]: I0128 15:32:20.456569 4959 generic.go:334] "Generic (PLEG): container finished" podID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerID="22dc693458b81146519af08d81a4fd03e436627c22ca99b3476e478ce1980d73" exitCode=0 Jan 28 15:32:20 crc kubenswrapper[4959]: I0128 15:32:20.456629 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" event={"ID":"db003231-aaa9-4d1d-9d56-046741bb4b32","Type":"ContainerDied","Data":"22dc693458b81146519af08d81a4fd03e436627c22ca99b3476e478ce1980d73"} Jan 28 15:32:20 crc kubenswrapper[4959]: I0128 15:32:20.461544 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbbnj" event={"ID":"a26ca325-a168-4f02-9c01-43b7a90ab37d","Type":"ContainerStarted","Data":"b467a95c11b4a82d7569473d0a2f41a2e969bc65c2c85edc80c7885c07fc9c4e"} Jan 28 15:32:20 crc kubenswrapper[4959]: I0128 15:32:20.501938 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xbbnj" podStartSLOduration=2.866288049 podStartE2EDuration="5.501905022s" podCreationTimestamp="2026-01-28 15:32:15 +0000 UTC" firstStartedPulling="2026-01-28 15:32:17.426817848 +0000 UTC m=+920.872724231" lastFinishedPulling="2026-01-28 15:32:20.062434821 +0000 UTC m=+923.508341204" observedRunningTime="2026-01-28 15:32:20.497494874 +0000 UTC m=+923.943401267" watchObservedRunningTime="2026-01-28 15:32:20.501905022 +0000 UTC m=+923.947811415" Jan 28 15:32:21 crc kubenswrapper[4959]: I0128 15:32:21.472019 4959 generic.go:334] "Generic (PLEG): container finished" podID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerID="8aaa3a6c4bea9e06b77934a86dd895c622b34e93a95286f09af84c6e66b34c67" exitCode=0 Jan 28 15:32:21 crc kubenswrapper[4959]: I0128 15:32:21.472161 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" event={"ID":"db003231-aaa9-4d1d-9d56-046741bb4b32","Type":"ContainerDied","Data":"8aaa3a6c4bea9e06b77934a86dd895c622b34e93a95286f09af84c6e66b34c67"} Jan 28 15:32:22 crc kubenswrapper[4959]: I0128 15:32:22.482677 4959 generic.go:334] "Generic (PLEG): container finished" podID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerID="556ea54e45fdd87e9128683ba1e0cee55291cfdf2af11d0e140bf9f74731ae77" exitCode=0 Jan 28 15:32:22 crc kubenswrapper[4959]: I0128 15:32:22.482730 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" event={"ID":"db003231-aaa9-4d1d-9d56-046741bb4b32","Type":"ContainerDied","Data":"556ea54e45fdd87e9128683ba1e0cee55291cfdf2af11d0e140bf9f74731ae77"} Jan 28 15:32:23 crc kubenswrapper[4959]: I0128 15:32:23.847653 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:23 crc kubenswrapper[4959]: I0128 15:32:23.925549 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-bundle\") pod \"db003231-aaa9-4d1d-9d56-046741bb4b32\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " Jan 28 15:32:23 crc kubenswrapper[4959]: I0128 15:32:23.926088 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-util\") pod \"db003231-aaa9-4d1d-9d56-046741bb4b32\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " Jan 28 15:32:23 crc kubenswrapper[4959]: I0128 15:32:23.926248 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdsx7\" (UniqueName: \"kubernetes.io/projected/db003231-aaa9-4d1d-9d56-046741bb4b32-kube-api-access-tdsx7\") pod \"db003231-aaa9-4d1d-9d56-046741bb4b32\" (UID: \"db003231-aaa9-4d1d-9d56-046741bb4b32\") " Jan 28 15:32:23 crc kubenswrapper[4959]: I0128 15:32:23.927259 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-bundle" (OuterVolumeSpecName: "bundle") pod "db003231-aaa9-4d1d-9d56-046741bb4b32" (UID: "db003231-aaa9-4d1d-9d56-046741bb4b32"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:32:23 crc kubenswrapper[4959]: I0128 15:32:23.934275 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db003231-aaa9-4d1d-9d56-046741bb4b32-kube-api-access-tdsx7" (OuterVolumeSpecName: "kube-api-access-tdsx7") pod "db003231-aaa9-4d1d-9d56-046741bb4b32" (UID: "db003231-aaa9-4d1d-9d56-046741bb4b32"). InnerVolumeSpecName "kube-api-access-tdsx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:32:23 crc kubenswrapper[4959]: I0128 15:32:23.942738 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-util" (OuterVolumeSpecName: "util") pod "db003231-aaa9-4d1d-9d56-046741bb4b32" (UID: "db003231-aaa9-4d1d-9d56-046741bb4b32"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:32:24 crc kubenswrapper[4959]: I0128 15:32:24.028335 4959 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:24 crc kubenswrapper[4959]: I0128 15:32:24.028375 4959 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/db003231-aaa9-4d1d-9d56-046741bb4b32-util\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:24 crc kubenswrapper[4959]: I0128 15:32:24.028393 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdsx7\" (UniqueName: \"kubernetes.io/projected/db003231-aaa9-4d1d-9d56-046741bb4b32-kube-api-access-tdsx7\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:24 crc kubenswrapper[4959]: I0128 15:32:24.511591 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" event={"ID":"db003231-aaa9-4d1d-9d56-046741bb4b32","Type":"ContainerDied","Data":"8886d1b2004fe737a0694653f4529f321c73115661d8ddc2e1aed3e6d7fd7b4a"} Jan 28 15:32:24 crc kubenswrapper[4959]: I0128 15:32:24.511640 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8886d1b2004fe737a0694653f4529f321c73115661d8ddc2e1aed3e6d7fd7b4a" Jan 28 15:32:24 crc kubenswrapper[4959]: I0128 15:32:24.511760 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45" Jan 28 15:32:25 crc kubenswrapper[4959]: I0128 15:32:25.965757 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:25 crc kubenswrapper[4959]: I0128 15:32:25.966419 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:26 crc kubenswrapper[4959]: I0128 15:32:26.010559 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:26 crc kubenswrapper[4959]: I0128 15:32:26.569699 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:29 crc kubenswrapper[4959]: I0128 15:32:29.408164 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xbbnj"] Jan 28 15:32:29 crc kubenswrapper[4959]: I0128 15:32:29.408454 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xbbnj" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerName="registry-server" containerID="cri-o://b467a95c11b4a82d7569473d0a2f41a2e969bc65c2c85edc80c7885c07fc9c4e" gracePeriod=2 Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.554676 4959 generic.go:334] "Generic (PLEG): container finished" podID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerID="b467a95c11b4a82d7569473d0a2f41a2e969bc65c2c85edc80c7885c07fc9c4e" exitCode=0 Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.554732 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbbnj" event={"ID":"a26ca325-a168-4f02-9c01-43b7a90ab37d","Type":"ContainerDied","Data":"b467a95c11b4a82d7569473d0a2f41a2e969bc65c2c85edc80c7885c07fc9c4e"} Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.880956 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx"] Jan 28 15:32:30 crc kubenswrapper[4959]: E0128 15:32:30.881341 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerName="pull" Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.881358 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerName="pull" Jan 28 15:32:30 crc kubenswrapper[4959]: E0128 15:32:30.881373 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerName="extract" Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.881382 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerName="extract" Jan 28 15:32:30 crc kubenswrapper[4959]: E0128 15:32:30.881398 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerName="util" Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.881406 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerName="util" Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.881539 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="db003231-aaa9-4d1d-9d56-046741bb4b32" containerName="extract" Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.882177 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.888557 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-t5qzx" Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.910798 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx"] Jan 28 15:32:30 crc kubenswrapper[4959]: I0128 15:32:30.933600 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc4rr\" (UniqueName: \"kubernetes.io/projected/6d73f93c-c6ca-4f91-828e-ed160e528063-kube-api-access-cc4rr\") pod \"openstack-operator-controller-init-678d9cfb88-q22tx\" (UID: \"6d73f93c-c6ca-4f91-828e-ed160e528063\") " pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.035502 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc4rr\" (UniqueName: \"kubernetes.io/projected/6d73f93c-c6ca-4f91-828e-ed160e528063-kube-api-access-cc4rr\") pod \"openstack-operator-controller-init-678d9cfb88-q22tx\" (UID: \"6d73f93c-c6ca-4f91-828e-ed160e528063\") " pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.083958 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc4rr\" (UniqueName: \"kubernetes.io/projected/6d73f93c-c6ca-4f91-828e-ed160e528063-kube-api-access-cc4rr\") pod \"openstack-operator-controller-init-678d9cfb88-q22tx\" (UID: \"6d73f93c-c6ca-4f91-828e-ed160e528063\") " pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.201625 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.515671 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx"] Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.565471 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" event={"ID":"6d73f93c-c6ca-4f91-828e-ed160e528063","Type":"ContainerStarted","Data":"fc5f4af61a7ca820511450ec062a3d6ca6c358709fbf20ba9e24c965b978cf1b"} Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.692646 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.750082 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4bcb\" (UniqueName: \"kubernetes.io/projected/a26ca325-a168-4f02-9c01-43b7a90ab37d-kube-api-access-g4bcb\") pod \"a26ca325-a168-4f02-9c01-43b7a90ab37d\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.750162 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-utilities\") pod \"a26ca325-a168-4f02-9c01-43b7a90ab37d\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.750273 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-catalog-content\") pod \"a26ca325-a168-4f02-9c01-43b7a90ab37d\" (UID: \"a26ca325-a168-4f02-9c01-43b7a90ab37d\") " Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.751507 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-utilities" (OuterVolumeSpecName: "utilities") pod "a26ca325-a168-4f02-9c01-43b7a90ab37d" (UID: "a26ca325-a168-4f02-9c01-43b7a90ab37d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.757220 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a26ca325-a168-4f02-9c01-43b7a90ab37d-kube-api-access-g4bcb" (OuterVolumeSpecName: "kube-api-access-g4bcb") pod "a26ca325-a168-4f02-9c01-43b7a90ab37d" (UID: "a26ca325-a168-4f02-9c01-43b7a90ab37d"). InnerVolumeSpecName "kube-api-access-g4bcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.806447 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a26ca325-a168-4f02-9c01-43b7a90ab37d" (UID: "a26ca325-a168-4f02-9c01-43b7a90ab37d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.851772 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.851824 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4bcb\" (UniqueName: \"kubernetes.io/projected/a26ca325-a168-4f02-9c01-43b7a90ab37d-kube-api-access-g4bcb\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:31 crc kubenswrapper[4959]: I0128 15:32:31.851839 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a26ca325-a168-4f02-9c01-43b7a90ab37d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:32 crc kubenswrapper[4959]: I0128 15:32:32.577640 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbbnj" event={"ID":"a26ca325-a168-4f02-9c01-43b7a90ab37d","Type":"ContainerDied","Data":"f9fe11e382d60667746b4993959ee6c788ed54ac672166bfa302bb39fe0c7aed"} Jan 28 15:32:32 crc kubenswrapper[4959]: I0128 15:32:32.577738 4959 scope.go:117] "RemoveContainer" containerID="b467a95c11b4a82d7569473d0a2f41a2e969bc65c2c85edc80c7885c07fc9c4e" Jan 28 15:32:32 crc kubenswrapper[4959]: I0128 15:32:32.577992 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbbnj" Jan 28 15:32:32 crc kubenswrapper[4959]: I0128 15:32:32.631881 4959 scope.go:117] "RemoveContainer" containerID="6ee771fc347b44de8b7641ef9d9a065ff7a8bf653d095c84863aa3f6bbab0b46" Jan 28 15:32:32 crc kubenswrapper[4959]: I0128 15:32:32.635332 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xbbnj"] Jan 28 15:32:32 crc kubenswrapper[4959]: I0128 15:32:32.645953 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xbbnj"] Jan 28 15:32:32 crc kubenswrapper[4959]: I0128 15:32:32.657676 4959 scope.go:117] "RemoveContainer" containerID="431026795d15f718746188ba11ad6b898c1af999dda75b7d468bee03f178b14e" Jan 28 15:32:34 crc kubenswrapper[4959]: I0128 15:32:34.594691 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" path="/var/lib/kubelet/pods/a26ca325-a168-4f02-9c01-43b7a90ab37d/volumes" Jan 28 15:32:38 crc kubenswrapper[4959]: I0128 15:32:38.633664 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" event={"ID":"6d73f93c-c6ca-4f91-828e-ed160e528063","Type":"ContainerStarted","Data":"dd43040d2eb2702a6be0f103f475e5ac449ed4c383ee6f3b9006021becfbdbe1"} Jan 28 15:32:38 crc kubenswrapper[4959]: I0128 15:32:38.634543 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" Jan 28 15:32:38 crc kubenswrapper[4959]: I0128 15:32:38.671869 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" podStartSLOduration=2.713826969 podStartE2EDuration="8.671841828s" podCreationTimestamp="2026-01-28 15:32:30 +0000 UTC" firstStartedPulling="2026-01-28 15:32:31.520360564 +0000 UTC m=+934.966266947" lastFinishedPulling="2026-01-28 15:32:37.478375423 +0000 UTC m=+940.924281806" observedRunningTime="2026-01-28 15:32:38.667326866 +0000 UTC m=+942.113233279" watchObservedRunningTime="2026-01-28 15:32:38.671841828 +0000 UTC m=+942.117748211" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.418610 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dmcb4"] Jan 28 15:32:40 crc kubenswrapper[4959]: E0128 15:32:40.418969 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerName="extract-content" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.419004 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerName="extract-content" Jan 28 15:32:40 crc kubenswrapper[4959]: E0128 15:32:40.419024 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerName="extract-utilities" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.419031 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerName="extract-utilities" Jan 28 15:32:40 crc kubenswrapper[4959]: E0128 15:32:40.419040 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerName="registry-server" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.419048 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerName="registry-server" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.419248 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="a26ca325-a168-4f02-9c01-43b7a90ab37d" containerName="registry-server" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.420410 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.430808 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmcb4"] Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.494928 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwn7g\" (UniqueName: \"kubernetes.io/projected/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-kube-api-access-bwn7g\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.495128 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-catalog-content\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.495164 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-utilities\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.596606 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-utilities\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.596678 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-catalog-content\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.596802 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwn7g\" (UniqueName: \"kubernetes.io/projected/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-kube-api-access-bwn7g\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.597220 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-utilities\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.597318 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-catalog-content\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.619981 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwn7g\" (UniqueName: \"kubernetes.io/projected/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-kube-api-access-bwn7g\") pod \"redhat-marketplace-dmcb4\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:40 crc kubenswrapper[4959]: I0128 15:32:40.741837 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:41 crc kubenswrapper[4959]: I0128 15:32:41.028373 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmcb4"] Jan 28 15:32:41 crc kubenswrapper[4959]: I0128 15:32:41.657229 4959 generic.go:334] "Generic (PLEG): container finished" podID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerID="f9a44e6abfd1e6c2a14f7bc9844fccd5e902cd530c7f009cb8eacb6954b1a600" exitCode=0 Jan 28 15:32:41 crc kubenswrapper[4959]: I0128 15:32:41.657358 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmcb4" event={"ID":"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6","Type":"ContainerDied","Data":"f9a44e6abfd1e6c2a14f7bc9844fccd5e902cd530c7f009cb8eacb6954b1a600"} Jan 28 15:32:41 crc kubenswrapper[4959]: I0128 15:32:41.658866 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmcb4" event={"ID":"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6","Type":"ContainerStarted","Data":"14cfcd61a24b0f960904808b9428ee6c375deda9f41a8ff653dad30e8ff642a3"} Jan 28 15:32:42 crc kubenswrapper[4959]: I0128 15:32:42.668204 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmcb4" event={"ID":"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6","Type":"ContainerStarted","Data":"9b8202f595d4971fdb047a2152e021c4de4e75f0d9eed1471f761b7706990d66"} Jan 28 15:32:43 crc kubenswrapper[4959]: I0128 15:32:43.678131 4959 generic.go:334] "Generic (PLEG): container finished" podID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerID="9b8202f595d4971fdb047a2152e021c4de4e75f0d9eed1471f761b7706990d66" exitCode=0 Jan 28 15:32:43 crc kubenswrapper[4959]: I0128 15:32:43.678240 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmcb4" event={"ID":"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6","Type":"ContainerDied","Data":"9b8202f595d4971fdb047a2152e021c4de4e75f0d9eed1471f761b7706990d66"} Jan 28 15:32:44 crc kubenswrapper[4959]: I0128 15:32:44.690978 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmcb4" event={"ID":"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6","Type":"ContainerStarted","Data":"318da4d729bbd57caae2d80870f34085ab6be46354c638a3720fab7255852552"} Jan 28 15:32:44 crc kubenswrapper[4959]: I0128 15:32:44.715199 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dmcb4" podStartSLOduration=2.054658917 podStartE2EDuration="4.715176412s" podCreationTimestamp="2026-01-28 15:32:40 +0000 UTC" firstStartedPulling="2026-01-28 15:32:41.659174706 +0000 UTC m=+945.105081089" lastFinishedPulling="2026-01-28 15:32:44.319692211 +0000 UTC m=+947.765598584" observedRunningTime="2026-01-28 15:32:44.710774774 +0000 UTC m=+948.156681167" watchObservedRunningTime="2026-01-28 15:32:44.715176412 +0000 UTC m=+948.161082795" Jan 28 15:32:50 crc kubenswrapper[4959]: I0128 15:32:50.742495 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:50 crc kubenswrapper[4959]: I0128 15:32:50.743076 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:50 crc kubenswrapper[4959]: I0128 15:32:50.786643 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:51 crc kubenswrapper[4959]: I0128 15:32:51.204345 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-678d9cfb88-q22tx" Jan 28 15:32:51 crc kubenswrapper[4959]: I0128 15:32:51.781875 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.221179 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tdzqm"] Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.222667 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.241484 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tdzqm"] Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.383857 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4lsk\" (UniqueName: \"kubernetes.io/projected/f767a492-d6c1-4e3e-b129-94f3ca1069de-kube-api-access-v4lsk\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.384036 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-utilities\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.384276 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-catalog-content\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.486310 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-utilities\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.486384 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-catalog-content\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.486408 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4lsk\" (UniqueName: \"kubernetes.io/projected/f767a492-d6c1-4e3e-b129-94f3ca1069de-kube-api-access-v4lsk\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.487499 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-utilities\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.487556 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-catalog-content\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.518297 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4lsk\" (UniqueName: \"kubernetes.io/projected/f767a492-d6c1-4e3e-b129-94f3ca1069de-kube-api-access-v4lsk\") pod \"certified-operators-tdzqm\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:52 crc kubenswrapper[4959]: I0128 15:32:52.542193 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:32:53 crc kubenswrapper[4959]: I0128 15:32:53.060281 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tdzqm"] Jan 28 15:32:53 crc kubenswrapper[4959]: I0128 15:32:53.774807 4959 generic.go:334] "Generic (PLEG): container finished" podID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerID="2bec34333ec8511f302275f4d8d6d58c3452108919573cd1994ef1547701d450" exitCode=0 Jan 28 15:32:53 crc kubenswrapper[4959]: I0128 15:32:53.775330 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tdzqm" event={"ID":"f767a492-d6c1-4e3e-b129-94f3ca1069de","Type":"ContainerDied","Data":"2bec34333ec8511f302275f4d8d6d58c3452108919573cd1994ef1547701d450"} Jan 28 15:32:53 crc kubenswrapper[4959]: I0128 15:32:53.775370 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tdzqm" event={"ID":"f767a492-d6c1-4e3e-b129-94f3ca1069de","Type":"ContainerStarted","Data":"5a5909a9ca4c636657f3fedbc7d1370ea77b281231749df3116fd1bfe99c07bb"} Jan 28 15:32:54 crc kubenswrapper[4959]: I0128 15:32:54.411454 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmcb4"] Jan 28 15:32:54 crc kubenswrapper[4959]: I0128 15:32:54.412267 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dmcb4" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerName="registry-server" containerID="cri-o://318da4d729bbd57caae2d80870f34085ab6be46354c638a3720fab7255852552" gracePeriod=2 Jan 28 15:32:54 crc kubenswrapper[4959]: I0128 15:32:54.790466 4959 generic.go:334] "Generic (PLEG): container finished" podID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerID="318da4d729bbd57caae2d80870f34085ab6be46354c638a3720fab7255852552" exitCode=0 Jan 28 15:32:54 crc kubenswrapper[4959]: I0128 15:32:54.790538 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmcb4" event={"ID":"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6","Type":"ContainerDied","Data":"318da4d729bbd57caae2d80870f34085ab6be46354c638a3720fab7255852552"} Jan 28 15:32:54 crc kubenswrapper[4959]: I0128 15:32:54.790583 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmcb4" event={"ID":"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6","Type":"ContainerDied","Data":"14cfcd61a24b0f960904808b9428ee6c375deda9f41a8ff653dad30e8ff642a3"} Jan 28 15:32:54 crc kubenswrapper[4959]: I0128 15:32:54.790602 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14cfcd61a24b0f960904808b9428ee6c375deda9f41a8ff653dad30e8ff642a3" Jan 28 15:32:54 crc kubenswrapper[4959]: I0128 15:32:54.856128 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.036003 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-catalog-content\") pod \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.036532 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwn7g\" (UniqueName: \"kubernetes.io/projected/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-kube-api-access-bwn7g\") pod \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.036625 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-utilities\") pod \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\" (UID: \"2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6\") " Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.037559 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-utilities" (OuterVolumeSpecName: "utilities") pod "2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" (UID: "2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.045309 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-kube-api-access-bwn7g" (OuterVolumeSpecName: "kube-api-access-bwn7g") pod "2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" (UID: "2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6"). InnerVolumeSpecName "kube-api-access-bwn7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.064687 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" (UID: "2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.138227 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.138272 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwn7g\" (UniqueName: \"kubernetes.io/projected/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-kube-api-access-bwn7g\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.138287 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.799666 4959 generic.go:334] "Generic (PLEG): container finished" podID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerID="cafacc91ada2517e9c8a63b017ff131ec6492889a88bc53cfded7e89a09d58aa" exitCode=0 Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.799776 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmcb4" Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.799791 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tdzqm" event={"ID":"f767a492-d6c1-4e3e-b129-94f3ca1069de","Type":"ContainerDied","Data":"cafacc91ada2517e9c8a63b017ff131ec6492889a88bc53cfded7e89a09d58aa"} Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.843784 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmcb4"] Jan 28 15:32:55 crc kubenswrapper[4959]: I0128 15:32:55.849760 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmcb4"] Jan 28 15:32:56 crc kubenswrapper[4959]: I0128 15:32:56.595595 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" path="/var/lib/kubelet/pods/2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6/volumes" Jan 28 15:32:57 crc kubenswrapper[4959]: I0128 15:32:57.830288 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tdzqm" event={"ID":"f767a492-d6c1-4e3e-b129-94f3ca1069de","Type":"ContainerStarted","Data":"47140aa3a4d74658c0731312e031f517abfb4d52eca4f19c6a714c34d4fb773a"} Jan 28 15:32:57 crc kubenswrapper[4959]: I0128 15:32:57.894355 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tdzqm" podStartSLOduration=2.500432074 podStartE2EDuration="5.894329236s" podCreationTimestamp="2026-01-28 15:32:52 +0000 UTC" firstStartedPulling="2026-01-28 15:32:53.777038882 +0000 UTC m=+957.222945265" lastFinishedPulling="2026-01-28 15:32:57.170936044 +0000 UTC m=+960.616842427" observedRunningTime="2026-01-28 15:32:57.894309965 +0000 UTC m=+961.340216348" watchObservedRunningTime="2026-01-28 15:32:57.894329236 +0000 UTC m=+961.340235629" Jan 28 15:32:58 crc kubenswrapper[4959]: I0128 15:32:58.689694 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:32:58 crc kubenswrapper[4959]: I0128 15:32:58.689790 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:33:02 crc kubenswrapper[4959]: I0128 15:33:02.542386 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:33:02 crc kubenswrapper[4959]: I0128 15:33:02.542936 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:33:02 crc kubenswrapper[4959]: I0128 15:33:02.604738 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:33:02 crc kubenswrapper[4959]: I0128 15:33:02.932328 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:33:05 crc kubenswrapper[4959]: I0128 15:33:05.017622 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tdzqm"] Jan 28 15:33:05 crc kubenswrapper[4959]: I0128 15:33:05.018749 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tdzqm" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerName="registry-server" containerID="cri-o://47140aa3a4d74658c0731312e031f517abfb4d52eca4f19c6a714c34d4fb773a" gracePeriod=2 Jan 28 15:33:05 crc kubenswrapper[4959]: I0128 15:33:05.890160 4959 generic.go:334] "Generic (PLEG): container finished" podID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerID="47140aa3a4d74658c0731312e031f517abfb4d52eca4f19c6a714c34d4fb773a" exitCode=0 Jan 28 15:33:05 crc kubenswrapper[4959]: I0128 15:33:05.890253 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tdzqm" event={"ID":"f767a492-d6c1-4e3e-b129-94f3ca1069de","Type":"ContainerDied","Data":"47140aa3a4d74658c0731312e031f517abfb4d52eca4f19c6a714c34d4fb773a"} Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.321400 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.426667 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4lsk\" (UniqueName: \"kubernetes.io/projected/f767a492-d6c1-4e3e-b129-94f3ca1069de-kube-api-access-v4lsk\") pod \"f767a492-d6c1-4e3e-b129-94f3ca1069de\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.426759 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-utilities\") pod \"f767a492-d6c1-4e3e-b129-94f3ca1069de\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.426803 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-catalog-content\") pod \"f767a492-d6c1-4e3e-b129-94f3ca1069de\" (UID: \"f767a492-d6c1-4e3e-b129-94f3ca1069de\") " Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.427983 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-utilities" (OuterVolumeSpecName: "utilities") pod "f767a492-d6c1-4e3e-b129-94f3ca1069de" (UID: "f767a492-d6c1-4e3e-b129-94f3ca1069de"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.434620 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f767a492-d6c1-4e3e-b129-94f3ca1069de-kube-api-access-v4lsk" (OuterVolumeSpecName: "kube-api-access-v4lsk") pod "f767a492-d6c1-4e3e-b129-94f3ca1069de" (UID: "f767a492-d6c1-4e3e-b129-94f3ca1069de"). InnerVolumeSpecName "kube-api-access-v4lsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.479876 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f767a492-d6c1-4e3e-b129-94f3ca1069de" (UID: "f767a492-d6c1-4e3e-b129-94f3ca1069de"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.528914 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4lsk\" (UniqueName: \"kubernetes.io/projected/f767a492-d6c1-4e3e-b129-94f3ca1069de-kube-api-access-v4lsk\") on node \"crc\" DevicePath \"\"" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.528957 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.528968 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f767a492-d6c1-4e3e-b129-94f3ca1069de-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.901909 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tdzqm" event={"ID":"f767a492-d6c1-4e3e-b129-94f3ca1069de","Type":"ContainerDied","Data":"5a5909a9ca4c636657f3fedbc7d1370ea77b281231749df3116fd1bfe99c07bb"} Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.901998 4959 scope.go:117] "RemoveContainer" containerID="47140aa3a4d74658c0731312e031f517abfb4d52eca4f19c6a714c34d4fb773a" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.901992 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tdzqm" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.929117 4959 scope.go:117] "RemoveContainer" containerID="cafacc91ada2517e9c8a63b017ff131ec6492889a88bc53cfded7e89a09d58aa" Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.930224 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tdzqm"] Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.938677 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tdzqm"] Jan 28 15:33:06 crc kubenswrapper[4959]: I0128 15:33:06.952188 4959 scope.go:117] "RemoveContainer" containerID="2bec34333ec8511f302275f4d8d6d58c3452108919573cd1994ef1547701d450" Jan 28 15:33:08 crc kubenswrapper[4959]: I0128 15:33:08.596752 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" path="/var/lib/kubelet/pods/f767a492-d6c1-4e3e-b129-94f3ca1069de/volumes" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.327273 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz"] Jan 28 15:33:09 crc kubenswrapper[4959]: E0128 15:33:09.327760 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerName="extract-content" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.327788 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerName="extract-content" Jan 28 15:33:09 crc kubenswrapper[4959]: E0128 15:33:09.327815 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerName="registry-server" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.327827 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerName="registry-server" Jan 28 15:33:09 crc kubenswrapper[4959]: E0128 15:33:09.327852 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerName="registry-server" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.327863 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerName="registry-server" Jan 28 15:33:09 crc kubenswrapper[4959]: E0128 15:33:09.327881 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerName="extract-utilities" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.327894 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerName="extract-utilities" Jan 28 15:33:09 crc kubenswrapper[4959]: E0128 15:33:09.327909 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerName="extract-utilities" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.327919 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerName="extract-utilities" Jan 28 15:33:09 crc kubenswrapper[4959]: E0128 15:33:09.327935 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerName="extract-content" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.327944 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerName="extract-content" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.328119 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f767a492-d6c1-4e3e-b129-94f3ca1069de" containerName="registry-server" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.328158 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a1c419d-aafd-4e9b-a0fe-cf9a35ccdea6" containerName="registry-server" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.328815 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.331181 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-748dh" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.335622 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.337288 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.339730 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-gj6hm" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.355269 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.373868 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.377035 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-tjb6f" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.417697 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.474584 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.478199 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tzpg\" (UniqueName: \"kubernetes.io/projected/174adb74-3ce7-4082-932c-4b8c00059fc7-kube-api-access-4tzpg\") pod \"barbican-operator-controller-manager-6bc7f4f4cf-kz6n7\" (UID: \"174adb74-3ce7-4082-932c-4b8c00059fc7\") " pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.478314 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qv222\" (UniqueName: \"kubernetes.io/projected/9234aa7e-f345-4186-bec5-e54051a95f1f-kube-api-access-qv222\") pod \"cinder-operator-controller-manager-f6487bd57-k75pz\" (UID: \"9234aa7e-f345-4186-bec5-e54051a95f1f\") " pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.478351 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtpbs\" (UniqueName: \"kubernetes.io/projected/6173ecaa-d123-4b57-8ffd-a1c19295b06f-kube-api-access-xtpbs\") pod \"designate-operator-controller-manager-66dfbd6f5d-zm9pn\" (UID: \"6173ecaa-d123-4b57-8ffd-a1c19295b06f\") " pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.490996 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.492087 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.497854 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-vhgxk" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.517219 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.525568 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.554875 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.556000 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.559716 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-hctsj" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.559945 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.561097 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.566229 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-958664b5-vcpch"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.566568 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-7mwvg" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.567841 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.571279 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-n7nj9" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.573044 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-958664b5-vcpch"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.582328 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.583875 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tzpg\" (UniqueName: \"kubernetes.io/projected/174adb74-3ce7-4082-932c-4b8c00059fc7-kube-api-access-4tzpg\") pod \"barbican-operator-controller-manager-6bc7f4f4cf-kz6n7\" (UID: \"174adb74-3ce7-4082-932c-4b8c00059fc7\") " pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.583969 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd6hz\" (UniqueName: \"kubernetes.io/projected/f957e48d-1d55-4d55-8059-693078a8db5f-kube-api-access-vd6hz\") pod \"glance-operator-controller-manager-6db5dbd896-d6t5g\" (UID: \"f957e48d-1d55-4d55-8059-693078a8db5f\") " pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.584029 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qv222\" (UniqueName: \"kubernetes.io/projected/9234aa7e-f345-4186-bec5-e54051a95f1f-kube-api-access-qv222\") pod \"cinder-operator-controller-manager-f6487bd57-k75pz\" (UID: \"9234aa7e-f345-4186-bec5-e54051a95f1f\") " pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.584065 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtpbs\" (UniqueName: \"kubernetes.io/projected/6173ecaa-d123-4b57-8ffd-a1c19295b06f-kube-api-access-xtpbs\") pod \"designate-operator-controller-manager-66dfbd6f5d-zm9pn\" (UID: \"6173ecaa-d123-4b57-8ffd-a1c19295b06f\") " pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.588050 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.600469 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.601966 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.608166 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-8bdhn" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.608370 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.613639 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.614993 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.627067 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-4mn72" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.632024 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.635437 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tzpg\" (UniqueName: \"kubernetes.io/projected/174adb74-3ce7-4082-932c-4b8c00059fc7-kube-api-access-4tzpg\") pod \"barbican-operator-controller-manager-6bc7f4f4cf-kz6n7\" (UID: \"174adb74-3ce7-4082-932c-4b8c00059fc7\") " pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.640847 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtpbs\" (UniqueName: \"kubernetes.io/projected/6173ecaa-d123-4b57-8ffd-a1c19295b06f-kube-api-access-xtpbs\") pod \"designate-operator-controller-manager-66dfbd6f5d-zm9pn\" (UID: \"6173ecaa-d123-4b57-8ffd-a1c19295b06f\") " pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.650934 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.655226 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qv222\" (UniqueName: \"kubernetes.io/projected/9234aa7e-f345-4186-bec5-e54051a95f1f-kube-api-access-qv222\") pod \"cinder-operator-controller-manager-f6487bd57-k75pz\" (UID: \"9234aa7e-f345-4186-bec5-e54051a95f1f\") " pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.664969 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-765668569f-qcwbm"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.665817 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-765668569f-qcwbm"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.665911 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.672396 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.673935 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-xv8t6" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.678030 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.681471 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.684200 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.684751 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-jlprd" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.687691 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.687744 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2qmb\" (UniqueName: \"kubernetes.io/projected/666e5c47-61d6-4fdf-bbaf-7aae03a06912-kube-api-access-h2qmb\") pod \"horizon-operator-controller-manager-5fb775575f-rcpd5\" (UID: \"666e5c47-61d6-4fdf-bbaf-7aae03a06912\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.687780 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2s9dn\" (UniqueName: \"kubernetes.io/projected/86f9260b-f92a-4dc1-9445-8bb3de058537-kube-api-access-2s9dn\") pod \"ironic-operator-controller-manager-958664b5-vcpch\" (UID: \"86f9260b-f92a-4dc1-9445-8bb3de058537\") " pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.687811 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brqd8\" (UniqueName: \"kubernetes.io/projected/08b2bcb6-f6da-4a40-95c6-c225a4c145fd-kube-api-access-brqd8\") pod \"keystone-operator-controller-manager-7b84b46695-wfxq5\" (UID: \"08b2bcb6-f6da-4a40-95c6-c225a4c145fd\") " pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.687839 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl5l8\" (UniqueName: \"kubernetes.io/projected/d3950d98-83ab-4ad0-b91c-cb838ae61278-kube-api-access-gl5l8\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.687862 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd6hz\" (UniqueName: \"kubernetes.io/projected/f957e48d-1d55-4d55-8059-693078a8db5f-kube-api-access-vd6hz\") pod \"glance-operator-controller-manager-6db5dbd896-d6t5g\" (UID: \"f957e48d-1d55-4d55-8059-693078a8db5f\") " pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.687887 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l5v9\" (UniqueName: \"kubernetes.io/projected/47665907-f1ce-43a1-a3b0-1510bd987a4f-kube-api-access-6l5v9\") pod \"heat-operator-controller-manager-587c6bfdcf-pd5tw\" (UID: \"47665907-f1ce-43a1-a3b0-1510bd987a4f\") " pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.688281 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.711463 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.731349 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.731770 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd6hz\" (UniqueName: \"kubernetes.io/projected/f957e48d-1d55-4d55-8059-693078a8db5f-kube-api-access-vd6hz\") pod \"glance-operator-controller-manager-6db5dbd896-d6t5g\" (UID: \"f957e48d-1d55-4d55-8059-693078a8db5f\") " pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.732763 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.745882 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-h2r2m" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.768202 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.769544 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.775859 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-wfjrv" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.796987 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.797074 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2qmb\" (UniqueName: \"kubernetes.io/projected/666e5c47-61d6-4fdf-bbaf-7aae03a06912-kube-api-access-h2qmb\") pod \"horizon-operator-controller-manager-5fb775575f-rcpd5\" (UID: \"666e5c47-61d6-4fdf-bbaf-7aae03a06912\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.797146 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2s9dn\" (UniqueName: \"kubernetes.io/projected/86f9260b-f92a-4dc1-9445-8bb3de058537-kube-api-access-2s9dn\") pod \"ironic-operator-controller-manager-958664b5-vcpch\" (UID: \"86f9260b-f92a-4dc1-9445-8bb3de058537\") " pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.797181 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rz9v\" (UniqueName: \"kubernetes.io/projected/edfcc729-f49c-4831-898c-448d88ae3236-kube-api-access-7rz9v\") pod \"mariadb-operator-controller-manager-67bf948998-zrxc4\" (UID: \"edfcc729-f49c-4831-898c-448d88ae3236\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.797223 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brqd8\" (UniqueName: \"kubernetes.io/projected/08b2bcb6-f6da-4a40-95c6-c225a4c145fd-kube-api-access-brqd8\") pod \"keystone-operator-controller-manager-7b84b46695-wfxq5\" (UID: \"08b2bcb6-f6da-4a40-95c6-c225a4c145fd\") " pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.797277 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl5l8\" (UniqueName: \"kubernetes.io/projected/d3950d98-83ab-4ad0-b91c-cb838ae61278-kube-api-access-gl5l8\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.797312 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l5v9\" (UniqueName: \"kubernetes.io/projected/47665907-f1ce-43a1-a3b0-1510bd987a4f-kube-api-access-6l5v9\") pod \"heat-operator-controller-manager-587c6bfdcf-pd5tw\" (UID: \"47665907-f1ce-43a1-a3b0-1510bd987a4f\") " pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.797355 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69vfs\" (UniqueName: \"kubernetes.io/projected/d9f17334-8180-44ba-ab9d-28c3a24c56ee-kube-api-access-69vfs\") pod \"manila-operator-controller-manager-765668569f-qcwbm\" (UID: \"d9f17334-8180-44ba-ab9d-28c3a24c56ee\") " pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" Jan 28 15:33:09 crc kubenswrapper[4959]: E0128 15:33:09.797561 4959 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:09 crc kubenswrapper[4959]: E0128 15:33:09.797635 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert podName:d3950d98-83ab-4ad0-b91c-cb838ae61278 nodeName:}" failed. No retries permitted until 2026-01-28 15:33:10.297609962 +0000 UTC m=+973.743516345 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert") pod "infra-operator-controller-manager-79955696d6-nzlrd" (UID: "d3950d98-83ab-4ad0-b91c-cb838ae61278") : secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.836543 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.845035 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.846730 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2s9dn\" (UniqueName: \"kubernetes.io/projected/86f9260b-f92a-4dc1-9445-8bb3de058537-kube-api-access-2s9dn\") pod \"ironic-operator-controller-manager-958664b5-vcpch\" (UID: \"86f9260b-f92a-4dc1-9445-8bb3de058537\") " pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.846821 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.847944 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.852853 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.876728 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-4qvc7" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.904651 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5ct2\" (UniqueName: \"kubernetes.io/projected/ef13bfff-4e74-42e2-96b7-c9e1dff84e92-kube-api-access-g5ct2\") pod \"nova-operator-controller-manager-ddcbfd695-vffmq\" (UID: \"ef13bfff-4e74-42e2-96b7-c9e1dff84e92\") " pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.904731 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69vfs\" (UniqueName: \"kubernetes.io/projected/d9f17334-8180-44ba-ab9d-28c3a24c56ee-kube-api-access-69vfs\") pod \"manila-operator-controller-manager-765668569f-qcwbm\" (UID: \"d9f17334-8180-44ba-ab9d-28c3a24c56ee\") " pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.904812 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpndt\" (UniqueName: \"kubernetes.io/projected/868c4023-b09a-433a-8a12-ff02629c4ff2-kube-api-access-qpndt\") pod \"neutron-operator-controller-manager-694c5bfc85-njl9c\" (UID: \"868c4023-b09a-433a-8a12-ff02629c4ff2\") " pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.904847 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rz9v\" (UniqueName: \"kubernetes.io/projected/edfcc729-f49c-4831-898c-448d88ae3236-kube-api-access-7rz9v\") pod \"mariadb-operator-controller-manager-67bf948998-zrxc4\" (UID: \"edfcc729-f49c-4831-898c-448d88ae3236\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.919320 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r"] Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.922008 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l5v9\" (UniqueName: \"kubernetes.io/projected/47665907-f1ce-43a1-a3b0-1510bd987a4f-kube-api-access-6l5v9\") pod \"heat-operator-controller-manager-587c6bfdcf-pd5tw\" (UID: \"47665907-f1ce-43a1-a3b0-1510bd987a4f\") " pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.927079 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl5l8\" (UniqueName: \"kubernetes.io/projected/d3950d98-83ab-4ad0-b91c-cb838ae61278-kube-api-access-gl5l8\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.927604 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brqd8\" (UniqueName: \"kubernetes.io/projected/08b2bcb6-f6da-4a40-95c6-c225a4c145fd-kube-api-access-brqd8\") pod \"keystone-operator-controller-manager-7b84b46695-wfxq5\" (UID: \"08b2bcb6-f6da-4a40-95c6-c225a4c145fd\") " pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.928146 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.933145 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2qmb\" (UniqueName: \"kubernetes.io/projected/666e5c47-61d6-4fdf-bbaf-7aae03a06912-kube-api-access-h2qmb\") pod \"horizon-operator-controller-manager-5fb775575f-rcpd5\" (UID: \"666e5c47-61d6-4fdf-bbaf-7aae03a06912\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.938941 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69vfs\" (UniqueName: \"kubernetes.io/projected/d9f17334-8180-44ba-ab9d-28c3a24c56ee-kube-api-access-69vfs\") pod \"manila-operator-controller-manager-765668569f-qcwbm\" (UID: \"d9f17334-8180-44ba-ab9d-28c3a24c56ee\") " pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" Jan 28 15:33:09 crc kubenswrapper[4959]: I0128 15:33:09.939627 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rz9v\" (UniqueName: \"kubernetes.io/projected/edfcc729-f49c-4831-898c-448d88ae3236-kube-api-access-7rz9v\") pod \"mariadb-operator-controller-manager-67bf948998-zrxc4\" (UID: \"edfcc729-f49c-4831-898c-448d88ae3236\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.033637 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.036997 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.043609 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-xk6wh" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.045481 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpndt\" (UniqueName: \"kubernetes.io/projected/868c4023-b09a-433a-8a12-ff02629c4ff2-kube-api-access-qpndt\") pod \"neutron-operator-controller-manager-694c5bfc85-njl9c\" (UID: \"868c4023-b09a-433a-8a12-ff02629c4ff2\") " pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.045718 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5ct2\" (UniqueName: \"kubernetes.io/projected/ef13bfff-4e74-42e2-96b7-c9e1dff84e92-kube-api-access-g5ct2\") pod \"nova-operator-controller-manager-ddcbfd695-vffmq\" (UID: \"ef13bfff-4e74-42e2-96b7-c9e1dff84e92\") " pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.045827 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjnvr\" (UniqueName: \"kubernetes.io/projected/9d84ca34-1566-41d9-a8aa-083d9c405581-kube-api-access-gjnvr\") pod \"octavia-operator-controller-manager-5c765b4558-mls2r\" (UID: \"9d84ca34-1566-41d9-a8aa-083d9c405581\") " pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.057560 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.058635 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.063169 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.064091 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-czkr8" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.065490 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.073212 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.073585 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.074691 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.079080 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-xsgmc" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.079258 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5ct2\" (UniqueName: \"kubernetes.io/projected/ef13bfff-4e74-42e2-96b7-c9e1dff84e92-kube-api-access-g5ct2\") pod \"nova-operator-controller-manager-ddcbfd695-vffmq\" (UID: \"ef13bfff-4e74-42e2-96b7-c9e1dff84e92\") " pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.083475 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpndt\" (UniqueName: \"kubernetes.io/projected/868c4023-b09a-433a-8a12-ff02629c4ff2-kube-api-access-qpndt\") pod \"neutron-operator-controller-manager-694c5bfc85-njl9c\" (UID: \"868c4023-b09a-433a-8a12-ff02629c4ff2\") " pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.090154 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-z872n"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.091624 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.098947 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.103983 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.107360 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.108714 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.109802 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-r4cvx" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.112172 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2gvvb" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.112253 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-z872n"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.118547 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.130658 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.131324 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.132443 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.144094 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.145334 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.145725 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.147159 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjnvr\" (UniqueName: \"kubernetes.io/projected/9d84ca34-1566-41d9-a8aa-083d9c405581-kube-api-access-gjnvr\") pod \"octavia-operator-controller-manager-5c765b4558-mls2r\" (UID: \"9d84ca34-1566-41d9-a8aa-083d9c405581\") " pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.147288 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jq5vh\" (UniqueName: \"kubernetes.io/projected/f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d-kube-api-access-jq5vh\") pod \"ovn-operator-controller-manager-788c46999f-zpcjn\" (UID: \"f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.154216 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-mj98b" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.174010 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.199095 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjnvr\" (UniqueName: \"kubernetes.io/projected/9d84ca34-1566-41d9-a8aa-083d9c405581-kube-api-access-gjnvr\") pod \"octavia-operator-controller-manager-5c765b4558-mls2r\" (UID: \"9d84ca34-1566-41d9-a8aa-083d9c405581\") " pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.199236 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.200403 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.204918 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.210056 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-nlsxv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.214580 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.215267 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.248598 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jq5vh\" (UniqueName: \"kubernetes.io/projected/f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d-kube-api-access-jq5vh\") pod \"ovn-operator-controller-manager-788c46999f-zpcjn\" (UID: \"f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.248661 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tnxh\" (UniqueName: \"kubernetes.io/projected/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-kube-api-access-6tnxh\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.248724 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.248756 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk7j7\" (UniqueName: \"kubernetes.io/projected/3fc1a295-6fb3-43e0-9379-eeb8b63a82a8-kube-api-access-nk7j7\") pod \"placement-operator-controller-manager-5b964cf4cd-tkl2z\" (UID: \"3fc1a295-6fb3-43e0-9379-eeb8b63a82a8\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.248777 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77whx\" (UniqueName: \"kubernetes.io/projected/b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10-kube-api-access-77whx\") pod \"swift-operator-controller-manager-68fc8c869-z872n\" (UID: \"b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.248854 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkcqv\" (UniqueName: \"kubernetes.io/projected/7607b89f-4c59-4c4c-9b63-3b25f4d653cf-kube-api-access-fkcqv\") pod \"test-operator-controller-manager-56f8bfcd9f-td678\" (UID: \"7607b89f-4c59-4c4c-9b63-3b25f4d653cf\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.248891 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drs89\" (UniqueName: \"kubernetes.io/projected/f2627922-faf4-4e19-8084-0a10b175546b-kube-api-access-drs89\") pod \"telemetry-operator-controller-manager-6d69b9c5db-xkcll\" (UID: \"f2627922-faf4-4e19-8084-0a10b175546b\") " pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.278645 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jq5vh\" (UniqueName: \"kubernetes.io/projected/f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d-kube-api-access-jq5vh\") pod \"ovn-operator-controller-manager-788c46999f-zpcjn\" (UID: \"f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.352913 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.352962 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk7j7\" (UniqueName: \"kubernetes.io/projected/3fc1a295-6fb3-43e0-9379-eeb8b63a82a8-kube-api-access-nk7j7\") pod \"placement-operator-controller-manager-5b964cf4cd-tkl2z\" (UID: \"3fc1a295-6fb3-43e0-9379-eeb8b63a82a8\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.353002 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77whx\" (UniqueName: \"kubernetes.io/projected/b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10-kube-api-access-77whx\") pod \"swift-operator-controller-manager-68fc8c869-z872n\" (UID: \"b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.353067 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.353102 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkcqv\" (UniqueName: \"kubernetes.io/projected/7607b89f-4c59-4c4c-9b63-3b25f4d653cf-kube-api-access-fkcqv\") pod \"test-operator-controller-manager-56f8bfcd9f-td678\" (UID: \"7607b89f-4c59-4c4c-9b63-3b25f4d653cf\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.353159 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drs89\" (UniqueName: \"kubernetes.io/projected/f2627922-faf4-4e19-8084-0a10b175546b-kube-api-access-drs89\") pod \"telemetry-operator-controller-manager-6d69b9c5db-xkcll\" (UID: \"f2627922-faf4-4e19-8084-0a10b175546b\") " pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.353185 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tnxh\" (UniqueName: \"kubernetes.io/projected/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-kube-api-access-6tnxh\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.353211 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdzz9\" (UniqueName: \"kubernetes.io/projected/91dbf1cd-c113-4dfb-bb3f-28d35d7994b2-kube-api-access-vdzz9\") pod \"watcher-operator-controller-manager-767b8bc766-rs272\" (UID: \"91dbf1cd-c113-4dfb-bb3f-28d35d7994b2\") " pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.353703 4959 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.353751 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert podName:4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a nodeName:}" failed. No retries permitted until 2026-01-28 15:33:10.853735666 +0000 UTC m=+974.299642049 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" (UID: "4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.354337 4959 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.354364 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert podName:d3950d98-83ab-4ad0-b91c-cb838ae61278 nodeName:}" failed. No retries permitted until 2026-01-28 15:33:11.354355382 +0000 UTC m=+974.800261765 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert") pod "infra-operator-controller-manager-79955696d6-nzlrd" (UID: "d3950d98-83ab-4ad0-b91c-cb838ae61278") : secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.377586 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.378830 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.381290 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk7j7\" (UniqueName: \"kubernetes.io/projected/3fc1a295-6fb3-43e0-9379-eeb8b63a82a8-kube-api-access-nk7j7\") pod \"placement-operator-controller-manager-5b964cf4cd-tkl2z\" (UID: \"3fc1a295-6fb3-43e0-9379-eeb8b63a82a8\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.382312 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tnxh\" (UniqueName: \"kubernetes.io/projected/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-kube-api-access-6tnxh\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.382813 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.383039 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.383261 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-lcvhq" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.384661 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkcqv\" (UniqueName: \"kubernetes.io/projected/7607b89f-4c59-4c4c-9b63-3b25f4d653cf-kube-api-access-fkcqv\") pod \"test-operator-controller-manager-56f8bfcd9f-td678\" (UID: \"7607b89f-4c59-4c4c-9b63-3b25f4d653cf\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.404928 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drs89\" (UniqueName: \"kubernetes.io/projected/f2627922-faf4-4e19-8084-0a10b175546b-kube-api-access-drs89\") pod \"telemetry-operator-controller-manager-6d69b9c5db-xkcll\" (UID: \"f2627922-faf4-4e19-8084-0a10b175546b\") " pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.405085 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77whx\" (UniqueName: \"kubernetes.io/projected/b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10-kube-api-access-77whx\") pod \"swift-operator-controller-manager-68fc8c869-z872n\" (UID: \"b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.436591 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.455766 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdzz9\" (UniqueName: \"kubernetes.io/projected/91dbf1cd-c113-4dfb-bb3f-28d35d7994b2-kube-api-access-vdzz9\") pod \"watcher-operator-controller-manager-767b8bc766-rs272\" (UID: \"91dbf1cd-c113-4dfb-bb3f-28d35d7994b2\") " pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.456436 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.472416 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.474680 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.482069 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-clb2v" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.491755 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdzz9\" (UniqueName: \"kubernetes.io/projected/91dbf1cd-c113-4dfb-bb3f-28d35d7994b2-kube-api-access-vdzz9\") pod \"watcher-operator-controller-manager-767b8bc766-rs272\" (UID: \"91dbf1cd-c113-4dfb-bb3f-28d35d7994b2\") " pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.506196 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.513298 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.523329 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.543073 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.552197 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.558965 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.559165 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj55h\" (UniqueName: \"kubernetes.io/projected/d5c6598b-38d8-4598-ac7e-862426f8c0d6-kube-api-access-fj55h\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mcn29\" (UID: \"d5c6598b-38d8-4598-ac7e-862426f8c0d6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.559378 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prjpx\" (UniqueName: \"kubernetes.io/projected/ab83fff6-1016-4052-adf6-13c1ac8b832c-kube-api-access-prjpx\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.559457 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.582010 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.604103 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn"] Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.612528 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.640998 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.652696 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.661032 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prjpx\" (UniqueName: \"kubernetes.io/projected/ab83fff6-1016-4052-adf6-13c1ac8b832c-kube-api-access-prjpx\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.661131 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.661186 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.661277 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fj55h\" (UniqueName: \"kubernetes.io/projected/d5c6598b-38d8-4598-ac7e-862426f8c0d6-kube-api-access-fj55h\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mcn29\" (UID: \"d5c6598b-38d8-4598-ac7e-862426f8c0d6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.661998 4959 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.662060 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:11.162041726 +0000 UTC m=+974.607948109 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "metrics-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.662090 4959 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.662202 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:11.162177651 +0000 UTC m=+974.608084024 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "webhook-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.691543 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj55h\" (UniqueName: \"kubernetes.io/projected/d5c6598b-38d8-4598-ac7e-862426f8c0d6-kube-api-access-fj55h\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mcn29\" (UID: \"d5c6598b-38d8-4598-ac7e-862426f8c0d6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" Jan 28 15:33:10 crc kubenswrapper[4959]: W0128 15:33:10.696145 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod174adb74_3ce7_4082_932c_4b8c00059fc7.slice/crio-cc7c110d22608823b01cecf95486b6a2c267551ecc83b620b4528b5553dda103 WatchSource:0}: Error finding container cc7c110d22608823b01cecf95486b6a2c267551ecc83b620b4528b5553dda103: Status 404 returned error can't find the container with id cc7c110d22608823b01cecf95486b6a2c267551ecc83b620b4528b5553dda103 Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.696477 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prjpx\" (UniqueName: \"kubernetes.io/projected/ab83fff6-1016-4052-adf6-13c1ac8b832c-kube-api-access-prjpx\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.865253 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.865470 4959 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: E0128 15:33:10.865562 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert podName:4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a nodeName:}" failed. No retries permitted until 2026-01-28 15:33:11.865540703 +0000 UTC m=+975.311447086 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" (UID: "4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:10 crc kubenswrapper[4959]: I0128 15:33:10.875161 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.004418 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-958664b5-vcpch"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.055710 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" event={"ID":"9234aa7e-f345-4186-bec5-e54051a95f1f","Type":"ContainerStarted","Data":"88135f6fde2fd9e2647ab2797a4e416e734c4d8cdf50fbdcdeef882194dc9d72"} Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.072163 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" event={"ID":"174adb74-3ce7-4082-932c-4b8c00059fc7","Type":"ContainerStarted","Data":"cc7c110d22608823b01cecf95486b6a2c267551ecc83b620b4528b5553dda103"} Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.088537 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" event={"ID":"6173ecaa-d123-4b57-8ffd-a1c19295b06f","Type":"ContainerStarted","Data":"7ac4b63933d9fd7497475aa58440ba288d7068b5874ed2e1806370ca6079062b"} Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.131902 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.183951 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.184055 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:11 crc kubenswrapper[4959]: E0128 15:33:11.184288 4959 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 15:33:11 crc kubenswrapper[4959]: E0128 15:33:11.184415 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:12.184387222 +0000 UTC m=+975.630293665 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "webhook-server-cert" not found Jan 28 15:33:11 crc kubenswrapper[4959]: E0128 15:33:11.184423 4959 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 15:33:11 crc kubenswrapper[4959]: E0128 15:33:11.184487 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:12.184469775 +0000 UTC m=+975.630376158 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "metrics-server-cert" not found Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.247911 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.387756 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.388805 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:11 crc kubenswrapper[4959]: E0128 15:33:11.388967 4959 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:11 crc kubenswrapper[4959]: E0128 15:33:11.389031 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert podName:d3950d98-83ab-4ad0-b91c-cb838ae61278 nodeName:}" failed. No retries permitted until 2026-01-28 15:33:13.389013147 +0000 UTC m=+976.834919530 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert") pod "infra-operator-controller-manager-79955696d6-nzlrd" (UID: "d3950d98-83ab-4ad0-b91c-cb838ae61278") : secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.455171 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.545964 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-765668569f-qcwbm"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.570099 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.601205 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.645982 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.801982 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.839086 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.845752 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.902212 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:11 crc kubenswrapper[4959]: E0128 15:33:11.902619 4959 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:11 crc kubenswrapper[4959]: E0128 15:33:11.902697 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert podName:4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a nodeName:}" failed. No retries permitted until 2026-01-28 15:33:13.902677368 +0000 UTC m=+977.348583751 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" (UID: "4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.932924 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-z872n"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.943045 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll"] Jan 28 15:33:11 crc kubenswrapper[4959]: I0128 15:33:11.995046 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29"] Jan 28 15:33:12 crc kubenswrapper[4959]: W0128 15:33:12.001804 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91dbf1cd_c113_4dfb_bb3f_28d35d7994b2.slice/crio-c0685b42902ebe79f20a07f82116a1fe3e933e279e85564ad319b0eda8143d54 WatchSource:0}: Error finding container c0685b42902ebe79f20a07f82116a1fe3e933e279e85564ad319b0eda8143d54: Status 404 returned error can't find the container with id c0685b42902ebe79f20a07f82116a1fe3e933e279e85564ad319b0eda8143d54 Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.003227 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272"] Jan 28 15:33:12 crc kubenswrapper[4959]: W0128 15:33:12.039834 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2627922_faf4_4e19_8084_0a10b175546b.slice/crio-cd601c945cf4d1dffff89fa96b522ef28c44e944a814ea67a457c33749a8b2e0 WatchSource:0}: Error finding container cd601c945cf4d1dffff89fa96b522ef28c44e944a814ea67a457c33749a8b2e0: Status 404 returned error can't find the container with id cd601c945cf4d1dffff89fa96b522ef28c44e944a814ea67a457c33749a8b2e0 Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.042613 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/telemetry-operator@sha256:c9d639f3d01f7a4f139a8b7fb751ca880893f7b9a4e596d6a5304534e46392ba,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-drs89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6d69b9c5db-xkcll_openstack-operators(f2627922-faf4-4e19-8084-0a10b175546b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.043778 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fj55h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-mcn29_openstack-operators(d5c6598b-38d8-4598-ac7e-862426f8c0d6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.043938 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" podUID="f2627922-faf4-4e19-8084-0a10b175546b" Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.047212 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" podUID="d5c6598b-38d8-4598-ac7e-862426f8c0d6" Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.102855 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" event={"ID":"ef13bfff-4e74-42e2-96b7-c9e1dff84e92","Type":"ContainerStarted","Data":"5626be3bba43b0af438d4e9d7ab119a89557d4721833299e02d5d868e6ee5bd1"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.113303 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" event={"ID":"edfcc729-f49c-4831-898c-448d88ae3236","Type":"ContainerStarted","Data":"ce4daa1f48fbc737519bf2ca916df7e1117da300e09a9a585a8252565e7130d3"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.118163 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678"] Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.123532 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" event={"ID":"f957e48d-1d55-4d55-8059-693078a8db5f","Type":"ContainerStarted","Data":"f757d42f887ac680231e1bf4a9ade584e63a27d31e08dc98388edfdef274b40f"} Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.127602 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fkcqv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-56f8bfcd9f-td678_openstack-operators(7607b89f-4c59-4c4c-9b63-3b25f4d653cf): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.128153 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" event={"ID":"3fc1a295-6fb3-43e0-9379-eeb8b63a82a8","Type":"ContainerStarted","Data":"a930166cdd8ab692dad16091b42b1ebeaf5789d9adac51ccc008e2a9dac17b4d"} Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.128719 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" podUID="7607b89f-4c59-4c4c-9b63-3b25f4d653cf" Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.142454 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" event={"ID":"868c4023-b09a-433a-8a12-ff02629c4ff2","Type":"ContainerStarted","Data":"6c6fee79c2093a8877c2441dcc6faf0964b6bb7117c15bd912fb53ae685b75f2"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.168522 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" event={"ID":"9d84ca34-1566-41d9-a8aa-083d9c405581","Type":"ContainerStarted","Data":"9103c26d766f37f962d5c48b4d799c399ea23c9014123993109a4a1bfe28ff9a"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.176792 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" event={"ID":"f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d","Type":"ContainerStarted","Data":"deaffddf8555144693650a6f766e5c5bcd7cf547cb381867d88e7578d8032ba5"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.184511 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" event={"ID":"d5c6598b-38d8-4598-ac7e-862426f8c0d6","Type":"ContainerStarted","Data":"23e803aab9df979d8a87bca570f87abede559ccfe2620f757bb259b1a5735dc5"} Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.191190 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" podUID="d5c6598b-38d8-4598-ac7e-862426f8c0d6" Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.193803 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" event={"ID":"91dbf1cd-c113-4dfb-bb3f-28d35d7994b2","Type":"ContainerStarted","Data":"c0685b42902ebe79f20a07f82116a1fe3e933e279e85564ad319b0eda8143d54"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.204711 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" event={"ID":"08b2bcb6-f6da-4a40-95c6-c225a4c145fd","Type":"ContainerStarted","Data":"48ffb248d34fc63357922251e4f73fa84f6e0bf2971d3cc782a649be8f4afedc"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.214389 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.214442 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.214641 4959 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.214694 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:14.214678389 +0000 UTC m=+977.660584762 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "metrics-server-cert" not found Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.215027 4959 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.215057 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:14.215049649 +0000 UTC m=+977.660956022 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "webhook-server-cert" not found Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.224479 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" event={"ID":"47665907-f1ce-43a1-a3b0-1510bd987a4f","Type":"ContainerStarted","Data":"cb7c49e16be8b80199e5682f6520ee180b86e11c8cc0f2940ae3330e5741541d"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.227429 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" event={"ID":"f2627922-faf4-4e19-8084-0a10b175546b","Type":"ContainerStarted","Data":"cd601c945cf4d1dffff89fa96b522ef28c44e944a814ea67a457c33749a8b2e0"} Jan 28 15:33:12 crc kubenswrapper[4959]: E0128 15:33:12.232432 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/telemetry-operator@sha256:c9d639f3d01f7a4f139a8b7fb751ca880893f7b9a4e596d6a5304534e46392ba\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" podUID="f2627922-faf4-4e19-8084-0a10b175546b" Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.242752 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" event={"ID":"b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10","Type":"ContainerStarted","Data":"a45c9f0e1cf2672b986b663e35bdef18d3c07bf6ea4039f62c0bcf7fd3ffe439"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.268420 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" event={"ID":"d9f17334-8180-44ba-ab9d-28c3a24c56ee","Type":"ContainerStarted","Data":"837aeeefdd2f1c50812c5239b5bd595899572c7a85540e45252ad7515ebe66c8"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.270838 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" event={"ID":"666e5c47-61d6-4fdf-bbaf-7aae03a06912","Type":"ContainerStarted","Data":"b574639ed4ed29aa215e5e34c882cd15d3bd53156376f71797620a6f22bdf5de"} Jan 28 15:33:12 crc kubenswrapper[4959]: I0128 15:33:12.308494 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" event={"ID":"86f9260b-f92a-4dc1-9445-8bb3de058537","Type":"ContainerStarted","Data":"3f17c747c24285ece5d6b7f581d6c3bff2c60e949993fc50cf264b103aecebf5"} Jan 28 15:33:13 crc kubenswrapper[4959]: I0128 15:33:13.333138 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" event={"ID":"7607b89f-4c59-4c4c-9b63-3b25f4d653cf","Type":"ContainerStarted","Data":"20345bc257f00f2fd1f356a0a541474d8cac1cd8b9fb89d03c1bd73f4e5854ff"} Jan 28 15:33:13 crc kubenswrapper[4959]: E0128 15:33:13.336431 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/telemetry-operator@sha256:c9d639f3d01f7a4f139a8b7fb751ca880893f7b9a4e596d6a5304534e46392ba\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" podUID="f2627922-faf4-4e19-8084-0a10b175546b" Jan 28 15:33:13 crc kubenswrapper[4959]: E0128 15:33:13.336833 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" podUID="d5c6598b-38d8-4598-ac7e-862426f8c0d6" Jan 28 15:33:13 crc kubenswrapper[4959]: E0128 15:33:13.336903 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" podUID="7607b89f-4c59-4c4c-9b63-3b25f4d653cf" Jan 28 15:33:13 crc kubenswrapper[4959]: I0128 15:33:13.447829 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:13 crc kubenswrapper[4959]: E0128 15:33:13.448726 4959 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:13 crc kubenswrapper[4959]: E0128 15:33:13.448799 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert podName:d3950d98-83ab-4ad0-b91c-cb838ae61278 nodeName:}" failed. No retries permitted until 2026-01-28 15:33:17.448775101 +0000 UTC m=+980.894681484 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert") pod "infra-operator-controller-manager-79955696d6-nzlrd" (UID: "d3950d98-83ab-4ad0-b91c-cb838ae61278") : secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:13 crc kubenswrapper[4959]: I0128 15:33:13.959075 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:13 crc kubenswrapper[4959]: E0128 15:33:13.959362 4959 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:13 crc kubenswrapper[4959]: E0128 15:33:13.959435 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert podName:4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a nodeName:}" failed. No retries permitted until 2026-01-28 15:33:17.959409078 +0000 UTC m=+981.405315461 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" (UID: "4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:14 crc kubenswrapper[4959]: I0128 15:33:14.315059 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:14 crc kubenswrapper[4959]: I0128 15:33:14.315155 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:14 crc kubenswrapper[4959]: E0128 15:33:14.315384 4959 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 15:33:14 crc kubenswrapper[4959]: E0128 15:33:14.315630 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:18.315605744 +0000 UTC m=+981.761512137 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "metrics-server-cert" not found Jan 28 15:33:14 crc kubenswrapper[4959]: E0128 15:33:14.317265 4959 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 15:33:14 crc kubenswrapper[4959]: E0128 15:33:14.317338 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:18.317326976 +0000 UTC m=+981.763233359 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "webhook-server-cert" not found Jan 28 15:33:14 crc kubenswrapper[4959]: E0128 15:33:14.466656 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" podUID="7607b89f-4c59-4c4c-9b63-3b25f4d653cf" Jan 28 15:33:17 crc kubenswrapper[4959]: I0128 15:33:17.541808 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:17 crc kubenswrapper[4959]: E0128 15:33:17.541982 4959 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:17 crc kubenswrapper[4959]: E0128 15:33:17.542522 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert podName:d3950d98-83ab-4ad0-b91c-cb838ae61278 nodeName:}" failed. No retries permitted until 2026-01-28 15:33:25.542494376 +0000 UTC m=+988.988400759 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert") pod "infra-operator-controller-manager-79955696d6-nzlrd" (UID: "d3950d98-83ab-4ad0-b91c-cb838ae61278") : secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:18 crc kubenswrapper[4959]: I0128 15:33:18.058137 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:18 crc kubenswrapper[4959]: E0128 15:33:18.058824 4959 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:18 crc kubenswrapper[4959]: E0128 15:33:18.058886 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert podName:4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a nodeName:}" failed. No retries permitted until 2026-01-28 15:33:26.058869494 +0000 UTC m=+989.504775877 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" (UID: "4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:18 crc kubenswrapper[4959]: I0128 15:33:18.363366 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:18 crc kubenswrapper[4959]: I0128 15:33:18.363460 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:18 crc kubenswrapper[4959]: E0128 15:33:18.363618 4959 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 15:33:18 crc kubenswrapper[4959]: E0128 15:33:18.363747 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:26.363714809 +0000 UTC m=+989.809621352 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "webhook-server-cert" not found Jan 28 15:33:18 crc kubenswrapper[4959]: E0128 15:33:18.363760 4959 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 15:33:18 crc kubenswrapper[4959]: E0128 15:33:18.363887 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:26.363857012 +0000 UTC m=+989.809763565 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "metrics-server-cert" not found Jan 28 15:33:25 crc kubenswrapper[4959]: I0128 15:33:25.617899 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:25 crc kubenswrapper[4959]: E0128 15:33:25.618172 4959 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:25 crc kubenswrapper[4959]: E0128 15:33:25.618933 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert podName:d3950d98-83ab-4ad0-b91c-cb838ae61278 nodeName:}" failed. No retries permitted until 2026-01-28 15:33:41.618898949 +0000 UTC m=+1005.064805332 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert") pod "infra-operator-controller-manager-79955696d6-nzlrd" (UID: "d3950d98-83ab-4ad0-b91c-cb838ae61278") : secret "infra-operator-webhook-server-cert" not found Jan 28 15:33:26 crc kubenswrapper[4959]: I0128 15:33:26.128275 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:26 crc kubenswrapper[4959]: E0128 15:33:26.128962 4959 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:26 crc kubenswrapper[4959]: E0128 15:33:26.129066 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert podName:4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a nodeName:}" failed. No retries permitted until 2026-01-28 15:33:42.129041964 +0000 UTC m=+1005.574948337 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" (UID: "4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 15:33:26 crc kubenswrapper[4959]: I0128 15:33:26.434451 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:26 crc kubenswrapper[4959]: I0128 15:33:26.434528 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:26 crc kubenswrapper[4959]: E0128 15:33:26.434705 4959 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 15:33:26 crc kubenswrapper[4959]: E0128 15:33:26.434823 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:42.434797392 +0000 UTC m=+1005.880703765 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "webhook-server-cert" not found Jan 28 15:33:26 crc kubenswrapper[4959]: E0128 15:33:26.434728 4959 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 15:33:26 crc kubenswrapper[4959]: E0128 15:33:26.434935 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs podName:ab83fff6-1016-4052-adf6-13c1ac8b832c nodeName:}" failed. No retries permitted until 2026-01-28 15:33:42.434905155 +0000 UTC m=+1005.880811718 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs") pod "openstack-operator-controller-manager-57d89bf95c-wkcdv" (UID: "ab83fff6-1016-4052-adf6-13c1ac8b832c") : secret "metrics-server-cert" not found Jan 28 15:33:28 crc kubenswrapper[4959]: E0128 15:33:28.499179 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/barbican-operator@sha256:eae1fc0ecdfc4f0bef5a980affa60155a5baacf1bdaaeeb18d9c2680f762bc9d" Jan 28 15:33:28 crc kubenswrapper[4959]: E0128 15:33:28.500184 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/barbican-operator@sha256:eae1fc0ecdfc4f0bef5a980affa60155a5baacf1bdaaeeb18d9c2680f762bc9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4tzpg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-6bc7f4f4cf-kz6n7_openstack-operators(174adb74-3ce7-4082-932c-4b8c00059fc7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:28 crc kubenswrapper[4959]: E0128 15:33:28.501453 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" podUID="174adb74-3ce7-4082-932c-4b8c00059fc7" Jan 28 15:33:28 crc kubenswrapper[4959]: E0128 15:33:28.607072 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/barbican-operator@sha256:eae1fc0ecdfc4f0bef5a980affa60155a5baacf1bdaaeeb18d9c2680f762bc9d\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" podUID="174adb74-3ce7-4082-932c-4b8c00059fc7" Jan 28 15:33:28 crc kubenswrapper[4959]: I0128 15:33:28.689951 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:33:28 crc kubenswrapper[4959]: I0128 15:33:28.690036 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:33:31 crc kubenswrapper[4959]: E0128 15:33:31.571013 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8" Jan 28 15:33:31 crc kubenswrapper[4959]: E0128 15:33:31.571774 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h2qmb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-5fb775575f-rcpd5_openstack-operators(666e5c47-61d6-4fdf-bbaf-7aae03a06912): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:31 crc kubenswrapper[4959]: E0128 15:33:31.573828 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" podUID="666e5c47-61d6-4fdf-bbaf-7aae03a06912" Jan 28 15:33:31 crc kubenswrapper[4959]: E0128 15:33:31.649416 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" podUID="666e5c47-61d6-4fdf-bbaf-7aae03a06912" Jan 28 15:33:32 crc kubenswrapper[4959]: E0128 15:33:32.844025 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/manila-operator@sha256:2e1a77365c3b08ff39892565abfc72b72e969f623e58a2663fb93890371fc9da" Jan 28 15:33:32 crc kubenswrapper[4959]: E0128 15:33:32.844284 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/manila-operator@sha256:2e1a77365c3b08ff39892565abfc72b72e969f623e58a2663fb93890371fc9da,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-69vfs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-765668569f-qcwbm_openstack-operators(d9f17334-8180-44ba-ab9d-28c3a24c56ee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:32 crc kubenswrapper[4959]: E0128 15:33:32.845555 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" podUID="d9f17334-8180-44ba-ab9d-28c3a24c56ee" Jan 28 15:33:33 crc kubenswrapper[4959]: E0128 15:33:33.660409 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/manila-operator@sha256:2e1a77365c3b08ff39892565abfc72b72e969f623e58a2663fb93890371fc9da\\\"\"" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" podUID="d9f17334-8180-44ba-ab9d-28c3a24c56ee" Jan 28 15:33:33 crc kubenswrapper[4959]: E0128 15:33:33.839034 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf" Jan 28 15:33:33 crc kubenswrapper[4959]: E0128 15:33:33.839331 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7rz9v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-67bf948998-zrxc4_openstack-operators(edfcc729-f49c-4831-898c-448d88ae3236): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:33 crc kubenswrapper[4959]: E0128 15:33:33.840553 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" podUID="edfcc729-f49c-4831-898c-448d88ae3236" Jan 28 15:33:34 crc kubenswrapper[4959]: E0128 15:33:34.670020 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:2d493137559b74e23edb4788b7fbdb38b3e239df0f2d7e6e540e50b2355fc3cf\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" podUID="edfcc729-f49c-4831-898c-448d88ae3236" Jan 28 15:33:36 crc kubenswrapper[4959]: E0128 15:33:36.150787 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/octavia-operator@sha256:c7804813a3bba8910a47a5f32bd528335e18397f93cf5f7e7181d3d2c209b59b" Jan 28 15:33:36 crc kubenswrapper[4959]: E0128 15:33:36.151059 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/octavia-operator@sha256:c7804813a3bba8910a47a5f32bd528335e18397f93cf5f7e7181d3d2c209b59b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gjnvr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-5c765b4558-mls2r_openstack-operators(9d84ca34-1566-41d9-a8aa-083d9c405581): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:36 crc kubenswrapper[4959]: E0128 15:33:36.152299 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" podUID="9d84ca34-1566-41d9-a8aa-083d9c405581" Jan 28 15:33:36 crc kubenswrapper[4959]: E0128 15:33:36.695551 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/octavia-operator@sha256:c7804813a3bba8910a47a5f32bd528335e18397f93cf5f7e7181d3d2c209b59b\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" podUID="9d84ca34-1566-41d9-a8aa-083d9c405581" Jan 28 15:33:37 crc kubenswrapper[4959]: E0128 15:33:37.038256 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/ironic-operator@sha256:5f48b6af05a584d3da5c973f83195d999cc151aa0f187cabc8002cb46d60afe5" Jan 28 15:33:37 crc kubenswrapper[4959]: E0128 15:33:37.038498 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/ironic-operator@sha256:5f48b6af05a584d3da5c973f83195d999cc151aa0f187cabc8002cb46d60afe5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2s9dn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-958664b5-vcpch_openstack-operators(86f9260b-f92a-4dc1-9445-8bb3de058537): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:37 crc kubenswrapper[4959]: E0128 15:33:37.039738 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" podUID="86f9260b-f92a-4dc1-9445-8bb3de058537" Jan 28 15:33:37 crc kubenswrapper[4959]: E0128 15:33:37.701707 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/ironic-operator@sha256:5f48b6af05a584d3da5c973f83195d999cc151aa0f187cabc8002cb46d60afe5\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" podUID="86f9260b-f92a-4dc1-9445-8bb3de058537" Jan 28 15:33:37 crc kubenswrapper[4959]: E0128 15:33:37.726963 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488" Jan 28 15:33:37 crc kubenswrapper[4959]: E0128 15:33:37.727278 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nk7j7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5b964cf4cd-tkl2z_openstack-operators(3fc1a295-6fb3-43e0-9379-eeb8b63a82a8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:37 crc kubenswrapper[4959]: E0128 15:33:37.728718 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" podUID="3fc1a295-6fb3-43e0-9379-eeb8b63a82a8" Jan 28 15:33:38 crc kubenswrapper[4959]: E0128 15:33:38.438269 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/watcher-operator@sha256:35f1eb96f42069bb8f7c33942fb86b41843ba02803464245c16192ccda3d50e4" Jan 28 15:33:38 crc kubenswrapper[4959]: E0128 15:33:38.438589 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/watcher-operator@sha256:35f1eb96f42069bb8f7c33942fb86b41843ba02803464245c16192ccda3d50e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vdzz9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-767b8bc766-rs272_openstack-operators(91dbf1cd-c113-4dfb-bb3f-28d35d7994b2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:38 crc kubenswrapper[4959]: E0128 15:33:38.440316 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" podUID="91dbf1cd-c113-4dfb-bb3f-28d35d7994b2" Jan 28 15:33:38 crc kubenswrapper[4959]: E0128 15:33:38.709183 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" podUID="3fc1a295-6fb3-43e0-9379-eeb8b63a82a8" Jan 28 15:33:38 crc kubenswrapper[4959]: E0128 15:33:38.712382 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/watcher-operator@sha256:35f1eb96f42069bb8f7c33942fb86b41843ba02803464245c16192ccda3d50e4\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" podUID="91dbf1cd-c113-4dfb-bb3f-28d35d7994b2" Jan 28 15:33:39 crc kubenswrapper[4959]: E0128 15:33:39.260003 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/heat-operator@sha256:429171b44a24e9e4dde46465d90a272d93b15317ea386184d6ad077cc119d3c9" Jan 28 15:33:39 crc kubenswrapper[4959]: E0128 15:33:39.260268 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/heat-operator@sha256:429171b44a24e9e4dde46465d90a272d93b15317ea386184d6ad077cc119d3c9,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6l5v9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-587c6bfdcf-pd5tw_openstack-operators(47665907-f1ce-43a1-a3b0-1510bd987a4f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:39 crc kubenswrapper[4959]: E0128 15:33:39.261500 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" podUID="47665907-f1ce-43a1-a3b0-1510bd987a4f" Jan 28 15:33:39 crc kubenswrapper[4959]: E0128 15:33:39.717794 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/heat-operator@sha256:429171b44a24e9e4dde46465d90a272d93b15317ea386184d6ad077cc119d3c9\\\"\"" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" podUID="47665907-f1ce-43a1-a3b0-1510bd987a4f" Jan 28 15:33:40 crc kubenswrapper[4959]: E0128 15:33:40.063957 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4" Jan 28 15:33:40 crc kubenswrapper[4959]: E0128 15:33:40.064657 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jq5vh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-788c46999f-zpcjn_openstack-operators(f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:40 crc kubenswrapper[4959]: E0128 15:33:40.065861 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" podUID="f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d" Jan 28 15:33:40 crc kubenswrapper[4959]: E0128 15:33:40.721898 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" podUID="f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d" Jan 28 15:33:40 crc kubenswrapper[4959]: E0128 15:33:40.892186 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382" Jan 28 15:33:40 crc kubenswrapper[4959]: E0128 15:33:40.892442 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-77whx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-68fc8c869-z872n_openstack-operators(b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:40 crc kubenswrapper[4959]: E0128 15:33:40.893682 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" podUID="b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10" Jan 28 15:33:41 crc kubenswrapper[4959]: E0128 15:33:41.624813 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/nova-operator@sha256:a992613466db3478a00c20c28639c4a12f6326aa52c40a418d1ec40038c83b61" Jan 28 15:33:41 crc kubenswrapper[4959]: E0128 15:33:41.625077 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/nova-operator@sha256:a992613466db3478a00c20c28639c4a12f6326aa52c40a418d1ec40038c83b61,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g5ct2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-ddcbfd695-vffmq_openstack-operators(ef13bfff-4e74-42e2-96b7-c9e1dff84e92): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:41 crc kubenswrapper[4959]: E0128 15:33:41.626432 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" podUID="ef13bfff-4e74-42e2-96b7-c9e1dff84e92" Jan 28 15:33:41 crc kubenswrapper[4959]: I0128 15:33:41.643883 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:41 crc kubenswrapper[4959]: I0128 15:33:41.652424 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3950d98-83ab-4ad0-b91c-cb838ae61278-cert\") pod \"infra-operator-controller-manager-79955696d6-nzlrd\" (UID: \"d3950d98-83ab-4ad0-b91c-cb838ae61278\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:41 crc kubenswrapper[4959]: E0128 15:33:41.730351 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382\\\"\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" podUID="b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10" Jan 28 15:33:41 crc kubenswrapper[4959]: E0128 15:33:41.730375 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/nova-operator@sha256:a992613466db3478a00c20c28639c4a12f6326aa52c40a418d1ec40038c83b61\\\"\"" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" podUID="ef13bfff-4e74-42e2-96b7-c9e1dff84e92" Jan 28 15:33:41 crc kubenswrapper[4959]: I0128 15:33:41.883758 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:33:42 crc kubenswrapper[4959]: I0128 15:33:42.153776 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:42 crc kubenswrapper[4959]: I0128 15:33:42.166624 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8\" (UID: \"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:42 crc kubenswrapper[4959]: E0128 15:33:42.306643 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/telemetry-operator@sha256:c9d639f3d01f7a4f139a8b7fb751ca880893f7b9a4e596d6a5304534e46392ba" Jan 28 15:33:42 crc kubenswrapper[4959]: E0128 15:33:42.306860 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/telemetry-operator@sha256:c9d639f3d01f7a4f139a8b7fb751ca880893f7b9a4e596d6a5304534e46392ba,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-drs89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6d69b9c5db-xkcll_openstack-operators(f2627922-faf4-4e19-8084-0a10b175546b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:42 crc kubenswrapper[4959]: E0128 15:33:42.308318 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" podUID="f2627922-faf4-4e19-8084-0a10b175546b" Jan 28 15:33:42 crc kubenswrapper[4959]: I0128 15:33:42.363532 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:33:42 crc kubenswrapper[4959]: I0128 15:33:42.458422 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:42 crc kubenswrapper[4959]: I0128 15:33:42.458539 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:42 crc kubenswrapper[4959]: I0128 15:33:42.465089 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-webhook-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:42 crc kubenswrapper[4959]: I0128 15:33:42.465247 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ab83fff6-1016-4052-adf6-13c1ac8b832c-metrics-certs\") pod \"openstack-operator-controller-manager-57d89bf95c-wkcdv\" (UID: \"ab83fff6-1016-4052-adf6-13c1ac8b832c\") " pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:42 crc kubenswrapper[4959]: I0128 15:33:42.659934 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:43 crc kubenswrapper[4959]: I0128 15:33:43.588816 4959 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 15:33:45 crc kubenswrapper[4959]: E0128 15:33:45.213516 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/keystone-operator@sha256:f832a7a2326f1b84e7963fdea324e2a5285d636b366f059465c98299ae2d2d63" Jan 28 15:33:45 crc kubenswrapper[4959]: E0128 15:33:45.214349 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/keystone-operator@sha256:f832a7a2326f1b84e7963fdea324e2a5285d636b366f059465c98299ae2d2d63,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-brqd8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b84b46695-wfxq5_openstack-operators(08b2bcb6-f6da-4a40-95c6-c225a4c145fd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:45 crc kubenswrapper[4959]: E0128 15:33:45.215636 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" podUID="08b2bcb6-f6da-4a40-95c6-c225a4c145fd" Jan 28 15:33:45 crc kubenswrapper[4959]: E0128 15:33:45.764536 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/keystone-operator@sha256:f832a7a2326f1b84e7963fdea324e2a5285d636b366f059465c98299ae2d2d63\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" podUID="08b2bcb6-f6da-4a40-95c6-c225a4c145fd" Jan 28 15:33:45 crc kubenswrapper[4959]: E0128 15:33:45.815935 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 28 15:33:45 crc kubenswrapper[4959]: E0128 15:33:45.816514 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fj55h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-mcn29_openstack-operators(d5c6598b-38d8-4598-ac7e-862426f8c0d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:33:45 crc kubenswrapper[4959]: E0128 15:33:45.818011 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" podUID="d5c6598b-38d8-4598-ac7e-862426f8c0d6" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.241970 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd"] Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.267883 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv"] Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.339408 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8"] Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.771525 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" event={"ID":"7607b89f-4c59-4c4c-9b63-3b25f4d653cf","Type":"ContainerStarted","Data":"06221d410cb82a96cba0279ad59b8810482dfc09025df8c6843095cab52d1ad1"} Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.772989 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.777958 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" event={"ID":"6173ecaa-d123-4b57-8ffd-a1c19295b06f","Type":"ContainerStarted","Data":"5896ead941f4189cefc16ca61fb0c862820e4062b7f25ec4fc513d3e951041a0"} Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.778762 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.785436 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" event={"ID":"d3950d98-83ab-4ad0-b91c-cb838ae61278","Type":"ContainerStarted","Data":"95beee69deb58f3c998034f0b06a8be1d7fe76ac4c195de49f131d1c189b042d"} Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.787341 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" event={"ID":"f957e48d-1d55-4d55-8059-693078a8db5f","Type":"ContainerStarted","Data":"c4905367795931b0c35552609633e5f7fc7ea34fef1a3510247794a18d4f8865"} Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.787527 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.789040 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" event={"ID":"868c4023-b09a-433a-8a12-ff02629c4ff2","Type":"ContainerStarted","Data":"de6c7b7b58ae2a0e83de21759e6d5f4239aa36f5c8d42b9c56a773e08cfd5b5c"} Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.790011 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.791410 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" event={"ID":"9234aa7e-f345-4186-bec5-e54051a95f1f","Type":"ContainerStarted","Data":"44c6ef493830ace7cde51c12bf500f3f7c85252205495fc6ab7c4f4ac94a6475"} Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.791783 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.803274 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" podStartSLOduration=4.278368257 podStartE2EDuration="37.801099864s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:12.127418647 +0000 UTC m=+975.573325030" lastFinishedPulling="2026-01-28 15:33:45.650150254 +0000 UTC m=+1009.096056637" observedRunningTime="2026-01-28 15:33:46.79722789 +0000 UTC m=+1010.243134293" watchObservedRunningTime="2026-01-28 15:33:46.801099864 +0000 UTC m=+1010.247006247" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.817003 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" podStartSLOduration=4.756513818 podStartE2EDuration="37.816984185s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:10.705137235 +0000 UTC m=+974.151043628" lastFinishedPulling="2026-01-28 15:33:43.765607622 +0000 UTC m=+1007.211513995" observedRunningTime="2026-01-28 15:33:46.813699214 +0000 UTC m=+1010.259605597" watchObservedRunningTime="2026-01-28 15:33:46.816984185 +0000 UTC m=+1010.262890558" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.833880 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" podStartSLOduration=4.586170544 podStartE2EDuration="37.833859979s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:10.518896582 +0000 UTC m=+973.964802965" lastFinishedPulling="2026-01-28 15:33:43.766586007 +0000 UTC m=+1007.212492400" observedRunningTime="2026-01-28 15:33:46.83186599 +0000 UTC m=+1010.277772373" watchObservedRunningTime="2026-01-28 15:33:46.833859979 +0000 UTC m=+1010.279766362" Jan 28 15:33:46 crc kubenswrapper[4959]: I0128 15:33:46.854153 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" podStartSLOduration=4.410393287 podStartE2EDuration="37.854127106s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.595547027 +0000 UTC m=+975.041453410" lastFinishedPulling="2026-01-28 15:33:45.039280846 +0000 UTC m=+1008.485187229" observedRunningTime="2026-01-28 15:33:46.847930675 +0000 UTC m=+1010.293837068" watchObservedRunningTime="2026-01-28 15:33:46.854127106 +0000 UTC m=+1010.300033489" Jan 28 15:33:47 crc kubenswrapper[4959]: I0128 15:33:47.609076 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" podStartSLOduration=6.284694489 podStartE2EDuration="38.609039602s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.442035929 +0000 UTC m=+974.887942312" lastFinishedPulling="2026-01-28 15:33:43.766381042 +0000 UTC m=+1007.212287425" observedRunningTime="2026-01-28 15:33:46.886881021 +0000 UTC m=+1010.332787414" watchObservedRunningTime="2026-01-28 15:33:47.609039602 +0000 UTC m=+1011.054945985" Jan 28 15:33:47 crc kubenswrapper[4959]: I0128 15:33:47.800772 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" event={"ID":"ab83fff6-1016-4052-adf6-13c1ac8b832c","Type":"ContainerStarted","Data":"a70f743b92aacbeabbf69989dcb033a49cce3d4ac34ba743e37f914c20ab65b8"} Jan 28 15:33:47 crc kubenswrapper[4959]: I0128 15:33:47.802338 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" event={"ID":"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a","Type":"ContainerStarted","Data":"cb578511d28208dd748757ca8692c9d9b06f1a11675f9271c60f2071d33a7bc8"} Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.822696 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" event={"ID":"174adb74-3ce7-4082-932c-4b8c00059fc7","Type":"ContainerStarted","Data":"b97c7d5b8b66b5d59f74ddf839635e53b8837a97aa1cee6bdd4e6365a2067063"} Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.823436 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.825471 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" event={"ID":"666e5c47-61d6-4fdf-bbaf-7aae03a06912","Type":"ContainerStarted","Data":"efde6d8974a8c284a33e6949ae88019f6d31fda80fbbd17546b7036fedfd2668"} Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.825609 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.838773 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" event={"ID":"ab83fff6-1016-4052-adf6-13c1ac8b832c","Type":"ContainerStarted","Data":"c214dc4130e7a5baf04f681147ea3f25153552da8f53d261d7c64961a76fa5d5"} Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.839387 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.875559 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" podStartSLOduration=3.08236386 podStartE2EDuration="39.875529589s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:10.750812966 +0000 UTC m=+974.196719349" lastFinishedPulling="2026-01-28 15:33:47.543978695 +0000 UTC m=+1010.989885078" observedRunningTime="2026-01-28 15:33:48.844170769 +0000 UTC m=+1012.290077162" watchObservedRunningTime="2026-01-28 15:33:48.875529589 +0000 UTC m=+1012.321435972" Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.935957 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" podStartSLOduration=38.935933352 podStartE2EDuration="38.935933352s" podCreationTimestamp="2026-01-28 15:33:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:33:48.932421986 +0000 UTC m=+1012.378328379" watchObservedRunningTime="2026-01-28 15:33:48.935933352 +0000 UTC m=+1012.381839735" Jan 28 15:33:48 crc kubenswrapper[4959]: I0128 15:33:48.986643 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" podStartSLOduration=4.2724109 podStartE2EDuration="39.986611626s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.827172135 +0000 UTC m=+975.273078528" lastFinishedPulling="2026-01-28 15:33:47.541372871 +0000 UTC m=+1010.987279254" observedRunningTime="2026-01-28 15:33:48.969738972 +0000 UTC m=+1012.415645365" watchObservedRunningTime="2026-01-28 15:33:48.986611626 +0000 UTC m=+1012.432518009" Jan 28 15:33:52 crc kubenswrapper[4959]: I0128 15:33:52.668289 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-57d89bf95c-wkcdv" Jan 28 15:33:54 crc kubenswrapper[4959]: E0128 15:33:54.591029 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/telemetry-operator@sha256:c9d639f3d01f7a4f139a8b7fb751ca880893f7b9a4e596d6a5304534e46392ba\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" podUID="f2627922-faf4-4e19-8084-0a10b175546b" Jan 28 15:33:57 crc kubenswrapper[4959]: E0128 15:33:57.589605 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" podUID="d5c6598b-38d8-4598-ac7e-862426f8c0d6" Jan 28 15:33:58 crc kubenswrapper[4959]: I0128 15:33:58.689394 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:33:58 crc kubenswrapper[4959]: I0128 15:33:58.689465 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:33:58 crc kubenswrapper[4959]: I0128 15:33:58.689523 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:33:58 crc kubenswrapper[4959]: I0128 15:33:58.690232 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e575dc9c25dda36f1b0b8c84111a641d3564e8c98ab1a5fe36fe70b774dfdfc"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:33:58 crc kubenswrapper[4959]: I0128 15:33:58.690290 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://8e575dc9c25dda36f1b0b8c84111a641d3564e8c98ab1a5fe36fe70b774dfdfc" gracePeriod=600 Jan 28 15:33:59 crc kubenswrapper[4959]: I0128 15:33:59.678347 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-k75pz" Jan 28 15:33:59 crc kubenswrapper[4959]: I0128 15:33:59.691845 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-kz6n7" Jan 28 15:33:59 crc kubenswrapper[4959]: I0128 15:33:59.740867 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-zm9pn" Jan 28 15:33:59 crc kubenswrapper[4959]: I0128 15:33:59.850777 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-d6t5g" Jan 28 15:34:00 crc kubenswrapper[4959]: I0128 15:34:00.219686 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-njl9c" Jan 28 15:34:00 crc kubenswrapper[4959]: I0128 15:34:00.220339 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-rcpd5" Jan 28 15:34:00 crc kubenswrapper[4959]: I0128 15:34:00.645567 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-td678" Jan 28 15:34:03 crc kubenswrapper[4959]: I0128 15:34:03.965924 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="8e575dc9c25dda36f1b0b8c84111a641d3564e8c98ab1a5fe36fe70b774dfdfc" exitCode=0 Jan 28 15:34:03 crc kubenswrapper[4959]: I0128 15:34:03.966056 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"8e575dc9c25dda36f1b0b8c84111a641d3564e8c98ab1a5fe36fe70b774dfdfc"} Jan 28 15:34:03 crc kubenswrapper[4959]: I0128 15:34:03.966514 4959 scope.go:117] "RemoveContainer" containerID="5190babcd72aa8d406516ffe324e9eaebbd4bede0a5bd30239f36eb74204fed6" Jan 28 15:34:04 crc kubenswrapper[4959]: E0128 15:34:04.485183 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/heat-operator@sha256:429171b44a24e9e4dde46465d90a272d93b15317ea386184d6ad077cc119d3c9" Jan 28 15:34:04 crc kubenswrapper[4959]: E0128 15:34:04.485514 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/heat-operator@sha256:429171b44a24e9e4dde46465d90a272d93b15317ea386184d6ad077cc119d3c9,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6l5v9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-587c6bfdcf-pd5tw_openstack-operators(47665907-f1ce-43a1-a3b0-1510bd987a4f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:34:04 crc kubenswrapper[4959]: E0128 15:34:04.486764 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" podUID="47665907-f1ce-43a1-a3b0-1510bd987a4f" Jan 28 15:34:04 crc kubenswrapper[4959]: E0128 15:34:04.492065 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:89f6fd332fabefd2fff5619432986b37c1c6d197dd1c510f21dfe4609939b8a6" Jan 28 15:34:04 crc kubenswrapper[4959]: E0128 15:34:04.492630 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:89f6fd332fabefd2fff5619432986b37c1c6d197dd1c510f21dfe4609939b8a6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6tnxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8_openstack-operators(4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:34:04 crc kubenswrapper[4959]: E0128 15:34:04.493897 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" podUID="4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a" Jan 28 15:34:08 crc kubenswrapper[4959]: E0128 15:34:08.077346 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:89f6fd332fabefd2fff5619432986b37c1c6d197dd1c510f21dfe4609939b8a6\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" podUID="4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a" Jan 28 15:34:08 crc kubenswrapper[4959]: E0128 15:34:08.138420 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/ironic-operator@sha256:5f48b6af05a584d3da5c973f83195d999cc151aa0f187cabc8002cb46d60afe5" Jan 28 15:34:08 crc kubenswrapper[4959]: E0128 15:34:08.138635 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/ironic-operator@sha256:5f48b6af05a584d3da5c973f83195d999cc151aa0f187cabc8002cb46d60afe5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2s9dn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-958664b5-vcpch_openstack-operators(86f9260b-f92a-4dc1-9445-8bb3de058537): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:34:08 crc kubenswrapper[4959]: E0128 15:34:08.139882 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" podUID="86f9260b-f92a-4dc1-9445-8bb3de058537" Jan 28 15:34:09 crc kubenswrapper[4959]: I0128 15:34:09.005897 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" event={"ID":"edfcc729-f49c-4831-898c-448d88ae3236","Type":"ContainerStarted","Data":"3543bc682afabf954fb266720a32583e4a9a530fbc96412136d32367f1a459a7"} Jan 28 15:34:09 crc kubenswrapper[4959]: I0128 15:34:09.007171 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" Jan 28 15:34:09 crc kubenswrapper[4959]: I0128 15:34:09.011536 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"b44c8493f2114ae61783ae8e4da030e783bddb8b9595a72c10f90d99f7517fe3"} Jan 28 15:34:09 crc kubenswrapper[4959]: I0128 15:34:09.019483 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" event={"ID":"3fc1a295-6fb3-43e0-9379-eeb8b63a82a8","Type":"ContainerStarted","Data":"b1f976556dd092300723e89fc8280e163dbc565474c44778d0016c5a64057cff"} Jan 28 15:34:09 crc kubenswrapper[4959]: I0128 15:34:09.019707 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" Jan 28 15:34:09 crc kubenswrapper[4959]: I0128 15:34:09.026914 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" podStartSLOduration=17.409970141 podStartE2EDuration="1m0.026894753s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.478122674 +0000 UTC m=+974.924029057" lastFinishedPulling="2026-01-28 15:33:54.095047286 +0000 UTC m=+1017.540953669" observedRunningTime="2026-01-28 15:34:09.025314115 +0000 UTC m=+1032.471220508" watchObservedRunningTime="2026-01-28 15:34:09.026894753 +0000 UTC m=+1032.472801136" Jan 28 15:34:09 crc kubenswrapper[4959]: I0128 15:34:09.068049 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" podStartSLOduration=3.508976065 podStartE2EDuration="1m0.068025344s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.838468912 +0000 UTC m=+975.284375295" lastFinishedPulling="2026-01-28 15:34:08.397518191 +0000 UTC m=+1031.843424574" observedRunningTime="2026-01-28 15:34:09.063078752 +0000 UTC m=+1032.508985145" watchObservedRunningTime="2026-01-28 15:34:09.068025344 +0000 UTC m=+1032.513931727" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.031271 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" event={"ID":"f2627922-faf4-4e19-8084-0a10b175546b","Type":"ContainerStarted","Data":"3b4d2923fc8f82410d84d50aed5f6e2ac2accc9e973dfaaef6d93790ca7e6a1a"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.032981 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.034483 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" event={"ID":"d9f17334-8180-44ba-ab9d-28c3a24c56ee","Type":"ContainerStarted","Data":"a0269028e9223b937987e80b99cdab74b6265bfa9e8e62db3d82eaa4f80a4de3"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.034966 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.036860 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" event={"ID":"91dbf1cd-c113-4dfb-bb3f-28d35d7994b2","Type":"ContainerStarted","Data":"cf66c52efb2c920841059c82eabc65c9336bdd0d26d47083bcf84b51e2b069cf"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.037430 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.039344 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" event={"ID":"08b2bcb6-f6da-4a40-95c6-c225a4c145fd","Type":"ContainerStarted","Data":"d0a4c8700aa02f15405625e12a97db45268829771877ab55fcfb3138f0c3d4fd"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.039894 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.041403 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" event={"ID":"b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10","Type":"ContainerStarted","Data":"af00126bbf32a2d95b0c1898b9ac0e7429b8d085f96e94043313836593aa1436"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.041819 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.043876 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" event={"ID":"ef13bfff-4e74-42e2-96b7-c9e1dff84e92","Type":"ContainerStarted","Data":"2fe6c931ea2bd00a4d1830570e561b64600117b4076928d3d38a47ae73fc9a03"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.044240 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.045360 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" event={"ID":"f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d","Type":"ContainerStarted","Data":"282c5c5653f299ccace36e6e45d1e31cd8d52f5fd3c0c3850868d1ffe1b29f71"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.045511 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.047089 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" event={"ID":"d3950d98-83ab-4ad0-b91c-cb838ae61278","Type":"ContainerStarted","Data":"730006ad8c3e654ab8725430188d650d34d14509810a20abe48dafe841164c79"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.047286 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.059664 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" event={"ID":"9d84ca34-1566-41d9-a8aa-083d9c405581","Type":"ContainerStarted","Data":"ef7021e0f2503d1b7bf71d80c5639979c751df786841ff57309694b952550e49"} Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.060156 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.089243 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" podStartSLOduration=4.386700256 podStartE2EDuration="1m1.089215187s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:12.042160253 +0000 UTC m=+975.488066636" lastFinishedPulling="2026-01-28 15:34:08.744675184 +0000 UTC m=+1032.190581567" observedRunningTime="2026-01-28 15:34:10.086150452 +0000 UTC m=+1033.532056855" watchObservedRunningTime="2026-01-28 15:34:10.089215187 +0000 UTC m=+1033.535121570" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.133895 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" podStartSLOduration=4.512365462 podStartE2EDuration="1m1.133868384s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.925421637 +0000 UTC m=+975.371328020" lastFinishedPulling="2026-01-28 15:34:08.546924559 +0000 UTC m=+1031.992830942" observedRunningTime="2026-01-28 15:34:10.130886281 +0000 UTC m=+1033.576792664" watchObservedRunningTime="2026-01-28 15:34:10.133868384 +0000 UTC m=+1033.579774767" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.164883 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" podStartSLOduration=4.300433959 podStartE2EDuration="1m1.164859495s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.679467848 +0000 UTC m=+975.125374231" lastFinishedPulling="2026-01-28 15:34:08.543893384 +0000 UTC m=+1031.989799767" observedRunningTime="2026-01-28 15:34:10.160661201 +0000 UTC m=+1033.606567584" watchObservedRunningTime="2026-01-28 15:34:10.164859495 +0000 UTC m=+1033.610765878" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.212393 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" podStartSLOduration=4.680394077 podStartE2EDuration="1m1.212362221s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:12.010353022 +0000 UTC m=+975.456259405" lastFinishedPulling="2026-01-28 15:34:08.542321166 +0000 UTC m=+1031.988227549" observedRunningTime="2026-01-28 15:34:10.209436039 +0000 UTC m=+1033.655342432" watchObservedRunningTime="2026-01-28 15:34:10.212362221 +0000 UTC m=+1033.658268604" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.295417 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" podStartSLOduration=3.959645289 podStartE2EDuration="1m1.295391099s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.210309478 +0000 UTC m=+974.656215851" lastFinishedPulling="2026-01-28 15:34:08.546055278 +0000 UTC m=+1031.991961661" observedRunningTime="2026-01-28 15:34:10.291237637 +0000 UTC m=+1033.737144030" watchObservedRunningTime="2026-01-28 15:34:10.295391099 +0000 UTC m=+1033.741297482" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.295795 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" podStartSLOduration=39.62953573 podStartE2EDuration="1m1.29578216s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:46.877612173 +0000 UTC m=+1010.323518556" lastFinishedPulling="2026-01-28 15:34:08.543858603 +0000 UTC m=+1031.989764986" observedRunningTime="2026-01-28 15:34:10.25143517 +0000 UTC m=+1033.697341563" watchObservedRunningTime="2026-01-28 15:34:10.29578216 +0000 UTC m=+1033.741688543" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.398620 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" podStartSLOduration=4.470873243 podStartE2EDuration="1m1.398595094s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.616340798 +0000 UTC m=+975.062247181" lastFinishedPulling="2026-01-28 15:34:08.544062649 +0000 UTC m=+1031.989969032" observedRunningTime="2026-01-28 15:34:10.397571868 +0000 UTC m=+1033.843478261" watchObservedRunningTime="2026-01-28 15:34:10.398595094 +0000 UTC m=+1033.844501477" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.402773 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" podStartSLOduration=8.512382317 podStartE2EDuration="1m1.402757196s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.594595814 +0000 UTC m=+975.040502197" lastFinishedPulling="2026-01-28 15:34:04.484970693 +0000 UTC m=+1027.930877076" observedRunningTime="2026-01-28 15:34:10.330935923 +0000 UTC m=+1033.776842316" watchObservedRunningTime="2026-01-28 15:34:10.402757196 +0000 UTC m=+1033.848663579" Jan 28 15:34:10 crc kubenswrapper[4959]: I0128 15:34:10.415054 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" podStartSLOduration=4.748503469 podStartE2EDuration="1m1.415031807s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.876845014 +0000 UTC m=+975.322751397" lastFinishedPulling="2026-01-28 15:34:08.543373352 +0000 UTC m=+1031.989279735" observedRunningTime="2026-01-28 15:34:10.41272042 +0000 UTC m=+1033.858626823" watchObservedRunningTime="2026-01-28 15:34:10.415031807 +0000 UTC m=+1033.860938200" Jan 28 15:34:13 crc kubenswrapper[4959]: I0128 15:34:13.085410 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" event={"ID":"d5c6598b-38d8-4598-ac7e-862426f8c0d6","Type":"ContainerStarted","Data":"11314d9d32269d373f5ef15cd6d73346feb07f3abe161f7e37b95358b027e59e"} Jan 28 15:34:13 crc kubenswrapper[4959]: I0128 15:34:13.105179 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mcn29" podStartSLOduration=2.774431084 podStartE2EDuration="1m3.105079134s" podCreationTimestamp="2026-01-28 15:33:10 +0000 UTC" firstStartedPulling="2026-01-28 15:33:12.043585188 +0000 UTC m=+975.489491571" lastFinishedPulling="2026-01-28 15:34:12.374233238 +0000 UTC m=+1035.820139621" observedRunningTime="2026-01-28 15:34:13.100818289 +0000 UTC m=+1036.546724672" watchObservedRunningTime="2026-01-28 15:34:13.105079134 +0000 UTC m=+1036.550985517" Jan 28 15:34:16 crc kubenswrapper[4959]: E0128 15:34:16.589796 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/heat-operator@sha256:429171b44a24e9e4dde46465d90a272d93b15317ea386184d6ad077cc119d3c9\\\"\"" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" podUID="47665907-f1ce-43a1-a3b0-1510bd987a4f" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.109664 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b84b46695-wfxq5" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.122881 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-vffmq" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.136600 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-765668569f-qcwbm" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.149657 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zrxc4" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.460208 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-mls2r" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.518021 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zpcjn" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.561925 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-tkl2z" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.597379 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6d69b9c5db-xkcll" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.616667 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-z872n" Jan 28 15:34:20 crc kubenswrapper[4959]: I0128 15:34:20.662825 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-rs272" Jan 28 15:34:21 crc kubenswrapper[4959]: E0128 15:34:21.589972 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/ironic-operator@sha256:5f48b6af05a584d3da5c973f83195d999cc151aa0f187cabc8002cb46d60afe5\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" podUID="86f9260b-f92a-4dc1-9445-8bb3de058537" Jan 28 15:34:21 crc kubenswrapper[4959]: I0128 15:34:21.891928 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79955696d6-nzlrd" Jan 28 15:34:24 crc kubenswrapper[4959]: I0128 15:34:24.212161 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" event={"ID":"4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a","Type":"ContainerStarted","Data":"2449ed7098a5cc89001524c01cb698589a7ae8eabb4f4a2a4ba6b9e58106a34c"} Jan 28 15:34:24 crc kubenswrapper[4959]: I0128 15:34:24.214617 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:34:24 crc kubenswrapper[4959]: I0128 15:34:24.249418 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" podStartSLOduration=39.012107502 podStartE2EDuration="1m15.2493933s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:47.539389953 +0000 UTC m=+1010.985296336" lastFinishedPulling="2026-01-28 15:34:23.776675741 +0000 UTC m=+1047.222582134" observedRunningTime="2026-01-28 15:34:24.238582323 +0000 UTC m=+1047.684488716" watchObservedRunningTime="2026-01-28 15:34:24.2493933 +0000 UTC m=+1047.695299683" Jan 28 15:34:32 crc kubenswrapper[4959]: I0128 15:34:32.371377 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8" Jan 28 15:34:33 crc kubenswrapper[4959]: I0128 15:34:33.280584 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" event={"ID":"47665907-f1ce-43a1-a3b0-1510bd987a4f","Type":"ContainerStarted","Data":"a28918d9b4ef85727b88a17d7e25a9c43cf634e99f8401a7e9fd600d67852920"} Jan 28 15:34:33 crc kubenswrapper[4959]: I0128 15:34:33.281311 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" Jan 28 15:34:33 crc kubenswrapper[4959]: I0128 15:34:33.316222 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" podStartSLOduration=2.8917208800000003 podStartE2EDuration="1m24.316200075s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.446403886 +0000 UTC m=+974.892310269" lastFinishedPulling="2026-01-28 15:34:32.870883081 +0000 UTC m=+1056.316789464" observedRunningTime="2026-01-28 15:34:33.314854302 +0000 UTC m=+1056.760760705" watchObservedRunningTime="2026-01-28 15:34:33.316200075 +0000 UTC m=+1056.762106458" Jan 28 15:34:34 crc kubenswrapper[4959]: I0128 15:34:34.290553 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" event={"ID":"86f9260b-f92a-4dc1-9445-8bb3de058537","Type":"ContainerStarted","Data":"d3e6e7839482146ecbd211d885c34815f3f5b05a91bf3a79e9c59fe07e810f36"} Jan 28 15:34:34 crc kubenswrapper[4959]: I0128 15:34:34.290950 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" Jan 28 15:34:34 crc kubenswrapper[4959]: I0128 15:34:34.306881 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" podStartSLOduration=3.07136277 podStartE2EDuration="1m25.306856619s" podCreationTimestamp="2026-01-28 15:33:09 +0000 UTC" firstStartedPulling="2026-01-28 15:33:11.171542026 +0000 UTC m=+974.617448409" lastFinishedPulling="2026-01-28 15:34:33.407035875 +0000 UTC m=+1056.852942258" observedRunningTime="2026-01-28 15:34:34.306288094 +0000 UTC m=+1057.752194497" watchObservedRunningTime="2026-01-28 15:34:34.306856619 +0000 UTC m=+1057.752763002" Jan 28 15:34:39 crc kubenswrapper[4959]: I0128 15:34:39.931500 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-pd5tw" Jan 28 15:34:40 crc kubenswrapper[4959]: I0128 15:34:40.076981 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-958664b5-vcpch" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.558523 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lf926"] Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.566509 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.572704 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.574382 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.580829 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lf926"] Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.581802 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.583869 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-nbq7n" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.642713 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4mkv"] Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.644498 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.649167 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51911834-44ad-4f5b-978b-0fc2099b753d-config\") pod \"dnsmasq-dns-675f4bcbfc-lf926\" (UID: \"51911834-44ad-4f5b-978b-0fc2099b753d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.649262 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb9tj\" (UniqueName: \"kubernetes.io/projected/51911834-44ad-4f5b-978b-0fc2099b753d-kube-api-access-jb9tj\") pod \"dnsmasq-dns-675f4bcbfc-lf926\" (UID: \"51911834-44ad-4f5b-978b-0fc2099b753d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.655866 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.672415 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4mkv"] Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.750574 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb9tj\" (UniqueName: \"kubernetes.io/projected/51911834-44ad-4f5b-978b-0fc2099b753d-kube-api-access-jb9tj\") pod \"dnsmasq-dns-675f4bcbfc-lf926\" (UID: \"51911834-44ad-4f5b-978b-0fc2099b753d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.750642 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.750684 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2r6m\" (UniqueName: \"kubernetes.io/projected/0cf32512-b717-422a-ba5a-96dd9ed39548-kube-api-access-q2r6m\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.750745 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51911834-44ad-4f5b-978b-0fc2099b753d-config\") pod \"dnsmasq-dns-675f4bcbfc-lf926\" (UID: \"51911834-44ad-4f5b-978b-0fc2099b753d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.750785 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-config\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.751922 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51911834-44ad-4f5b-978b-0fc2099b753d-config\") pod \"dnsmasq-dns-675f4bcbfc-lf926\" (UID: \"51911834-44ad-4f5b-978b-0fc2099b753d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.773574 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb9tj\" (UniqueName: \"kubernetes.io/projected/51911834-44ad-4f5b-978b-0fc2099b753d-kube-api-access-jb9tj\") pod \"dnsmasq-dns-675f4bcbfc-lf926\" (UID: \"51911834-44ad-4f5b-978b-0fc2099b753d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.852413 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.852568 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2r6m\" (UniqueName: \"kubernetes.io/projected/0cf32512-b717-422a-ba5a-96dd9ed39548-kube-api-access-q2r6m\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.853118 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-config\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.854195 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-config\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.854861 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.875978 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2r6m\" (UniqueName: \"kubernetes.io/projected/0cf32512-b717-422a-ba5a-96dd9ed39548-kube-api-access-q2r6m\") pod \"dnsmasq-dns-78dd6ddcc-s4mkv\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.906549 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:34:57 crc kubenswrapper[4959]: I0128 15:34:57.971899 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:34:58 crc kubenswrapper[4959]: I0128 15:34:58.240223 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lf926"] Jan 28 15:34:58 crc kubenswrapper[4959]: I0128 15:34:58.470810 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" event={"ID":"51911834-44ad-4f5b-978b-0fc2099b753d","Type":"ContainerStarted","Data":"57a5076634da648d72b3837bd444e07f36becc3f8d7c8a9ad90eb60d9cfbdf9b"} Jan 28 15:34:58 crc kubenswrapper[4959]: I0128 15:34:58.520644 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4mkv"] Jan 28 15:34:58 crc kubenswrapper[4959]: W0128 15:34:58.524807 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0cf32512_b717_422a_ba5a_96dd9ed39548.slice/crio-cb3ca695c904d560c0e59cdc638906d14e5789f3f21322b9fbe38368942ce17a WatchSource:0}: Error finding container cb3ca695c904d560c0e59cdc638906d14e5789f3f21322b9fbe38368942ce17a: Status 404 returned error can't find the container with id cb3ca695c904d560c0e59cdc638906d14e5789f3f21322b9fbe38368942ce17a Jan 28 15:34:59 crc kubenswrapper[4959]: I0128 15:34:59.479731 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" event={"ID":"0cf32512-b717-422a-ba5a-96dd9ed39548","Type":"ContainerStarted","Data":"cb3ca695c904d560c0e59cdc638906d14e5789f3f21322b9fbe38368942ce17a"} Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.464982 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lf926"] Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.533711 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-mx5pt"] Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.535236 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.540443 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-mx5pt"] Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.609437 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-config\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.609549 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcpj7\" (UniqueName: \"kubernetes.io/projected/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-kube-api-access-tcpj7\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.609618 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-dns-svc\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.719378 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-config\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.722064 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcpj7\" (UniqueName: \"kubernetes.io/projected/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-kube-api-access-tcpj7\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.722185 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-dns-svc\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.722324 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-config\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.723924 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-dns-svc\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.760699 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcpj7\" (UniqueName: \"kubernetes.io/projected/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-kube-api-access-tcpj7\") pod \"dnsmasq-dns-666b6646f7-mx5pt\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.836261 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4mkv"] Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.872537 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.880709 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-w7lfc"] Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.882437 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:00 crc kubenswrapper[4959]: I0128 15:35:00.887719 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-w7lfc"] Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.030600 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgts9\" (UniqueName: \"kubernetes.io/projected/b942b031-74b0-4bf7-8dae-75362fa7e393-kube-api-access-rgts9\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.031148 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.031189 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-config\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.182587 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgts9\" (UniqueName: \"kubernetes.io/projected/b942b031-74b0-4bf7-8dae-75362fa7e393-kube-api-access-rgts9\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.182660 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.182694 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-config\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.183796 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-config\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.184477 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.239161 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgts9\" (UniqueName: \"kubernetes.io/projected/b942b031-74b0-4bf7-8dae-75362fa7e393-kube-api-access-rgts9\") pod \"dnsmasq-dns-57d769cc4f-w7lfc\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.403210 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-mx5pt"] Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.516897 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" event={"ID":"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae","Type":"ContainerStarted","Data":"a8725f279d610e155bc12bbd225a0af5075591745f7da01f5319a2ec0875c981"} Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.525076 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.659470 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.662025 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.669260 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.669683 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.670786 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.671128 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.671267 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-n5sdm" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.671389 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.671501 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.680789 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.794255 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.794331 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f86gs\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-kube-api-access-f86gs\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.794354 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.794376 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a81258f3-e48f-44f0-93d9-02e58302683a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.794403 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.794514 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-config-data\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.794836 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a81258f3-e48f-44f0-93d9-02e58302683a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.795033 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.795149 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.795380 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.795570 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897467 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f86gs\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-kube-api-access-f86gs\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897526 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897563 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a81258f3-e48f-44f0-93d9-02e58302683a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897600 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897630 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-config-data\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897664 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a81258f3-e48f-44f0-93d9-02e58302683a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897694 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897722 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897773 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897819 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.897845 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.898397 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.898478 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.898896 4959 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.899947 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.900382 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.900679 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a81258f3-e48f-44f0-93d9-02e58302683a-config-data\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.928437 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a81258f3-e48f-44f0-93d9-02e58302683a-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.929315 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.931070 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.932796 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a81258f3-e48f-44f0-93d9-02e58302683a-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.937969 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f86gs\" (UniqueName: \"kubernetes.io/projected/a81258f3-e48f-44f0-93d9-02e58302683a-kube-api-access-f86gs\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:01 crc kubenswrapper[4959]: I0128 15:35:01.960671 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"a81258f3-e48f-44f0-93d9-02e58302683a\") " pod="openstack/rabbitmq-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.022060 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.052722 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.054316 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.065183 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.065684 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.065809 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.066234 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.066815 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-sk92j" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.066992 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.067195 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.080548 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.206263 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.206739 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.206777 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.206801 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.206823 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.206842 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ee1fc53a-3817-4c94-8bd6-569c089c02cb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.206964 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.206987 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.207019 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.207133 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjp6k\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-kube-api-access-jjp6k\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.207168 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ee1fc53a-3817-4c94-8bd6-569c089c02cb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.308476 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjp6k\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-kube-api-access-jjp6k\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.308526 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ee1fc53a-3817-4c94-8bd6-569c089c02cb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309040 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309068 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309311 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309336 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309354 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309372 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ee1fc53a-3817-4c94-8bd6-569c089c02cb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309413 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309443 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309488 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.309949 4959 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.310295 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.310820 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.314708 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.315512 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ee1fc53a-3817-4c94-8bd6-569c089c02cb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.315882 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.316079 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.316938 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ee1fc53a-3817-4c94-8bd6-569c089c02cb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.319804 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.322323 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ee1fc53a-3817-4c94-8bd6-569c089c02cb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.338917 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjp6k\" (UniqueName: \"kubernetes.io/projected/ee1fc53a-3817-4c94-8bd6-569c089c02cb-kube-api-access-jjp6k\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.362232 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ee1fc53a-3817-4c94-8bd6-569c089c02cb\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.368466 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-w7lfc"] Jan 28 15:35:02 crc kubenswrapper[4959]: W0128 15:35:02.378671 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb942b031_74b0_4bf7_8dae_75362fa7e393.slice/crio-edba64655d165ac8b69502a13ca2aada7efaa5f54dbe97ad20ba587dc2a0a0ac WatchSource:0}: Error finding container edba64655d165ac8b69502a13ca2aada7efaa5f54dbe97ad20ba587dc2a0a0ac: Status 404 returned error can't find the container with id edba64655d165ac8b69502a13ca2aada7efaa5f54dbe97ad20ba587dc2a0a0ac Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.405957 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.534465 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" event={"ID":"b942b031-74b0-4bf7-8dae-75362fa7e393","Type":"ContainerStarted","Data":"edba64655d165ac8b69502a13ca2aada7efaa5f54dbe97ad20ba587dc2a0a0ac"} Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.719601 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 15:35:02 crc kubenswrapper[4959]: I0128 15:35:02.909581 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.070132 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.084387 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.084593 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.090265 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.090536 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-dkfmb" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.090598 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.093555 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.102758 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.133492 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.133545 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-kolla-config\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.133594 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsxtn\" (UniqueName: \"kubernetes.io/projected/d7f92998-4485-45fa-b5c5-cbb5211799c6-kube-api-access-tsxtn\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.133651 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f92998-4485-45fa-b5c5-cbb5211799c6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.133678 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d7f92998-4485-45fa-b5c5-cbb5211799c6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.133707 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-config-data-default\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.133784 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.133830 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f92998-4485-45fa-b5c5-cbb5211799c6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.238842 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f92998-4485-45fa-b5c5-cbb5211799c6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.238911 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.238927 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-kolla-config\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.238969 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsxtn\" (UniqueName: \"kubernetes.io/projected/d7f92998-4485-45fa-b5c5-cbb5211799c6-kube-api-access-tsxtn\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.239451 4959 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.239908 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-kolla-config\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.243855 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f92998-4485-45fa-b5c5-cbb5211799c6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.243917 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d7f92998-4485-45fa-b5c5-cbb5211799c6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.243967 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-config-data-default\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.244135 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.245673 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.245901 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d7f92998-4485-45fa-b5c5-cbb5211799c6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.246524 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d7f92998-4485-45fa-b5c5-cbb5211799c6-config-data-default\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.256668 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f92998-4485-45fa-b5c5-cbb5211799c6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.272674 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.285298 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f92998-4485-45fa-b5c5-cbb5211799c6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.289049 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsxtn\" (UniqueName: \"kubernetes.io/projected/d7f92998-4485-45fa-b5c5-cbb5211799c6-kube-api-access-tsxtn\") pod \"openstack-galera-0\" (UID: \"d7f92998-4485-45fa-b5c5-cbb5211799c6\") " pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.417491 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.551672 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a81258f3-e48f-44f0-93d9-02e58302683a","Type":"ContainerStarted","Data":"a37a38037096d9c92af033114724bee11e8c705468a9693c6ca634ced67307b3"} Jan 28 15:35:03 crc kubenswrapper[4959]: I0128 15:35:03.554673 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ee1fc53a-3817-4c94-8bd6-569c089c02cb","Type":"ContainerStarted","Data":"33125be7492a06e150b52da300e839a46f4d825e0ce894a2b39505d42bf9be0f"} Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.362624 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.365159 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.365272 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.374626 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.374873 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-cbmtk" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.375001 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.375354 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.471394 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.471559 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/54187dbe-318c-4070-9771-d3d98fc10457-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.471697 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsm96\" (UniqueName: \"kubernetes.io/projected/54187dbe-318c-4070-9771-d3d98fc10457-kube-api-access-wsm96\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.472009 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.472244 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.472312 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.472402 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54187dbe-318c-4070-9771-d3d98fc10457-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.472506 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/54187dbe-318c-4070-9771-d3d98fc10457-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.575135 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/54187dbe-318c-4070-9771-d3d98fc10457-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.575561 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.575591 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/54187dbe-318c-4070-9771-d3d98fc10457-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.575626 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsm96\" (UniqueName: \"kubernetes.io/projected/54187dbe-318c-4070-9771-d3d98fc10457-kube-api-access-wsm96\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.575673 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.575713 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.575737 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.575758 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54187dbe-318c-4070-9771-d3d98fc10457-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.576741 4959 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.576990 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/54187dbe-318c-4070-9771-d3d98fc10457-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.577773 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.578476 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.579690 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54187dbe-318c-4070-9771-d3d98fc10457-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.586517 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54187dbe-318c-4070-9771-d3d98fc10457-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.597791 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/54187dbe-318c-4070-9771-d3d98fc10457-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.611661 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.629474 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsm96\" (UniqueName: \"kubernetes.io/projected/54187dbe-318c-4070-9771-d3d98fc10457-kube-api-access-wsm96\") pod \"openstack-cell1-galera-0\" (UID: \"54187dbe-318c-4070-9771-d3d98fc10457\") " pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.722850 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.810625 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.835520 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.837219 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.843941 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.844276 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.844348 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-k4nfj" Jan 28 15:35:04 crc kubenswrapper[4959]: I0128 15:35:04.847819 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.001017 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e7bad372-1c02-48b3-838d-7d5e97b30b57-kolla-config\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.001099 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrqm4\" (UniqueName: \"kubernetes.io/projected/e7bad372-1c02-48b3-838d-7d5e97b30b57-kube-api-access-xrqm4\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.001166 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7bad372-1c02-48b3-838d-7d5e97b30b57-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.001252 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7bad372-1c02-48b3-838d-7d5e97b30b57-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.001279 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7bad372-1c02-48b3-838d-7d5e97b30b57-config-data\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.186252 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7bad372-1c02-48b3-838d-7d5e97b30b57-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.186310 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7bad372-1c02-48b3-838d-7d5e97b30b57-config-data\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.186365 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e7bad372-1c02-48b3-838d-7d5e97b30b57-kolla-config\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.186392 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrqm4\" (UniqueName: \"kubernetes.io/projected/e7bad372-1c02-48b3-838d-7d5e97b30b57-kube-api-access-xrqm4\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.186433 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7bad372-1c02-48b3-838d-7d5e97b30b57-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.188243 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7bad372-1c02-48b3-838d-7d5e97b30b57-config-data\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.192209 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e7bad372-1c02-48b3-838d-7d5e97b30b57-kolla-config\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.192726 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7bad372-1c02-48b3-838d-7d5e97b30b57-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.199498 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e7bad372-1c02-48b3-838d-7d5e97b30b57-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.229254 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrqm4\" (UniqueName: \"kubernetes.io/projected/e7bad372-1c02-48b3-838d-7d5e97b30b57-kube-api-access-xrqm4\") pod \"memcached-0\" (UID: \"e7bad372-1c02-48b3-838d-7d5e97b30b57\") " pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.476390 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.614079 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d7f92998-4485-45fa-b5c5-cbb5211799c6","Type":"ContainerStarted","Data":"ef7942b3a2b66de3e24a4ce1724fc7e5c29890e848a59d4ea3aecd4f46653a3e"} Jan 28 15:35:05 crc kubenswrapper[4959]: I0128 15:35:05.631648 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 15:35:05 crc kubenswrapper[4959]: W0128 15:35:05.657499 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54187dbe_318c_4070_9771_d3d98fc10457.slice/crio-89672de87d0208d1bd72b0865baee93baa62cdcc4764a07dd1715b650b6c3868 WatchSource:0}: Error finding container 89672de87d0208d1bd72b0865baee93baa62cdcc4764a07dd1715b650b6c3868: Status 404 returned error can't find the container with id 89672de87d0208d1bd72b0865baee93baa62cdcc4764a07dd1715b650b6c3868 Jan 28 15:35:06 crc kubenswrapper[4959]: I0128 15:35:06.121503 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 28 15:35:06 crc kubenswrapper[4959]: W0128 15:35:06.178985 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7bad372_1c02_48b3_838d_7d5e97b30b57.slice/crio-0729a9c3ed3427b68da761d889ba518224461db7ea5d0838bb57489d56b4d304 WatchSource:0}: Error finding container 0729a9c3ed3427b68da761d889ba518224461db7ea5d0838bb57489d56b4d304: Status 404 returned error can't find the container with id 0729a9c3ed3427b68da761d889ba518224461db7ea5d0838bb57489d56b4d304 Jan 28 15:35:06 crc kubenswrapper[4959]: I0128 15:35:06.677698 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"54187dbe-318c-4070-9771-d3d98fc10457","Type":"ContainerStarted","Data":"89672de87d0208d1bd72b0865baee93baa62cdcc4764a07dd1715b650b6c3868"} Jan 28 15:35:06 crc kubenswrapper[4959]: I0128 15:35:06.703235 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e7bad372-1c02-48b3-838d-7d5e97b30b57","Type":"ContainerStarted","Data":"0729a9c3ed3427b68da761d889ba518224461db7ea5d0838bb57489d56b4d304"} Jan 28 15:35:07 crc kubenswrapper[4959]: I0128 15:35:07.254437 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 15:35:07 crc kubenswrapper[4959]: I0128 15:35:07.256039 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 15:35:07 crc kubenswrapper[4959]: I0128 15:35:07.260149 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-dxx58" Jan 28 15:35:07 crc kubenswrapper[4959]: I0128 15:35:07.261682 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 15:35:07 crc kubenswrapper[4959]: I0128 15:35:07.393854 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnt92\" (UniqueName: \"kubernetes.io/projected/4c05f0db-12b1-4491-8680-2df359888603-kube-api-access-wnt92\") pod \"kube-state-metrics-0\" (UID: \"4c05f0db-12b1-4491-8680-2df359888603\") " pod="openstack/kube-state-metrics-0" Jan 28 15:35:07 crc kubenswrapper[4959]: I0128 15:35:07.495744 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnt92\" (UniqueName: \"kubernetes.io/projected/4c05f0db-12b1-4491-8680-2df359888603-kube-api-access-wnt92\") pod \"kube-state-metrics-0\" (UID: \"4c05f0db-12b1-4491-8680-2df359888603\") " pod="openstack/kube-state-metrics-0" Jan 28 15:35:07 crc kubenswrapper[4959]: I0128 15:35:07.713132 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnt92\" (UniqueName: \"kubernetes.io/projected/4c05f0db-12b1-4491-8680-2df359888603-kube-api-access-wnt92\") pod \"kube-state-metrics-0\" (UID: \"4c05f0db-12b1-4491-8680-2df359888603\") " pod="openstack/kube-state-metrics-0" Jan 28 15:35:07 crc kubenswrapper[4959]: I0128 15:35:07.968131 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 15:35:08 crc kubenswrapper[4959]: I0128 15:35:08.994498 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 15:35:09 crc kubenswrapper[4959]: I0128 15:35:09.774288 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4c05f0db-12b1-4491-8680-2df359888603","Type":"ContainerStarted","Data":"b32432d1527b9de93f80ee6a127c260ab33d6232e391e55923df9932d7cdff90"} Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.254662 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.256817 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.260243 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.260280 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.260640 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.260789 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.260912 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-9ssd9" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.274079 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.446789 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.446933 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.446967 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sw77c\" (UniqueName: \"kubernetes.io/projected/72dccd04-5057-4f99-942e-750ca1f7b3b5-kube-api-access-sw77c\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.446991 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/72dccd04-5057-4f99-942e-750ca1f7b3b5-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.447016 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.447053 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/72dccd04-5057-4f99-942e-750ca1f7b3b5-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.447117 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.447199 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72dccd04-5057-4f99-942e-750ca1f7b3b5-config\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.549052 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.549170 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.549207 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sw77c\" (UniqueName: \"kubernetes.io/projected/72dccd04-5057-4f99-942e-750ca1f7b3b5-kube-api-access-sw77c\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.549247 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/72dccd04-5057-4f99-942e-750ca1f7b3b5-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.549285 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.549326 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/72dccd04-5057-4f99-942e-750ca1f7b3b5-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.549386 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.549425 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72dccd04-5057-4f99-942e-750ca1f7b3b5-config\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.550943 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72dccd04-5057-4f99-942e-750ca1f7b3b5-config\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.553950 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/72dccd04-5057-4f99-942e-750ca1f7b3b5-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.562747 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/72dccd04-5057-4f99-942e-750ca1f7b3b5-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.568682 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.568806 4959 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.570460 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.574950 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/72dccd04-5057-4f99-942e-750ca1f7b3b5-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.617091 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sw77c\" (UniqueName: \"kubernetes.io/projected/72dccd04-5057-4f99-942e-750ca1f7b3b5-kube-api-access-sw77c\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.642184 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"72dccd04-5057-4f99-942e-750ca1f7b3b5\") " pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:10 crc kubenswrapper[4959]: I0128 15:35:10.892640 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.197843 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-bp544"] Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.199537 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.204706 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.205057 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.205256 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-bt799" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.219769 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-zf2dp"] Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.222301 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.237609 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-bp544"] Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.285504 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-zf2dp"] Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.335887 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-lib\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.335943 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-run-ovn\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.335979 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-run\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.336100 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-run\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.336219 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-etc-ovs\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.336244 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8171da25-3ff2-431c-a3a6-482426500111-scripts\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.336340 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6fh8\" (UniqueName: \"kubernetes.io/projected/8171da25-3ff2-431c-a3a6-482426500111-kube-api-access-l6fh8\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.336574 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-combined-ca-bundle\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.336801 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-ovn-controller-tls-certs\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.336996 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-scripts\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.337071 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-log\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.337148 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-log-ovn\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.337178 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndt97\" (UniqueName: \"kubernetes.io/projected/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-kube-api-access-ndt97\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478476 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-scripts\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478547 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-log\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478572 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-log-ovn\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478595 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndt97\" (UniqueName: \"kubernetes.io/projected/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-kube-api-access-ndt97\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478622 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-lib\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478645 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-run-ovn\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478672 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-run\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478691 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-run\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478717 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-etc-ovs\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478756 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8171da25-3ff2-431c-a3a6-482426500111-scripts\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478787 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6fh8\" (UniqueName: \"kubernetes.io/projected/8171da25-3ff2-431c-a3a6-482426500111-kube-api-access-l6fh8\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478861 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-combined-ca-bundle\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.478905 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-ovn-controller-tls-certs\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.480563 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-run-ovn\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.480828 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-etc-ovs\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.480979 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-run\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.481151 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-lib\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.481370 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/8171da25-3ff2-431c-a3a6-482426500111-var-log\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.483316 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-log-ovn\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.489262 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-scripts\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.489661 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8171da25-3ff2-431c-a3a6-482426500111-scripts\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.490571 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-var-run\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.499280 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-ovn-controller-tls-certs\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.500242 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-combined-ca-bundle\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.509964 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6fh8\" (UniqueName: \"kubernetes.io/projected/8171da25-3ff2-431c-a3a6-482426500111-kube-api-access-l6fh8\") pod \"ovn-controller-ovs-zf2dp\" (UID: \"8171da25-3ff2-431c-a3a6-482426500111\") " pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.510492 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndt97\" (UniqueName: \"kubernetes.io/projected/93fc210e-4599-4436-b8e6-a20a8c5cd2b4-kube-api-access-ndt97\") pod \"ovn-controller-bp544\" (UID: \"93fc210e-4599-4436-b8e6-a20a8c5cd2b4\") " pod="openstack/ovn-controller-bp544" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.577892 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:35:12 crc kubenswrapper[4959]: I0128 15:35:12.578732 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-bp544" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.381594 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.388355 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.391869 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.392425 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.393177 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.393310 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-b5bv4" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.398841 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.584947 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3e4ce542-8a53-4fea-b644-4ccb423a313b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.585011 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.585089 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e4ce542-8a53-4fea-b644-4ccb423a313b-config\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.585146 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.585200 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e4ce542-8a53-4fea-b644-4ccb423a313b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.585280 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.585308 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.585334 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48sks\" (UniqueName: \"kubernetes.io/projected/3e4ce542-8a53-4fea-b644-4ccb423a313b-kube-api-access-48sks\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.687077 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3e4ce542-8a53-4fea-b644-4ccb423a313b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.687135 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.687171 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e4ce542-8a53-4fea-b644-4ccb423a313b-config\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.687190 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.687219 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e4ce542-8a53-4fea-b644-4ccb423a313b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.687262 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.687284 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.687303 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48sks\" (UniqueName: \"kubernetes.io/projected/3e4ce542-8a53-4fea-b644-4ccb423a313b-kube-api-access-48sks\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.688191 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3e4ce542-8a53-4fea-b644-4ccb423a313b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.688759 4959 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.694682 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3e4ce542-8a53-4fea-b644-4ccb423a313b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.699952 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.702613 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.764471 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.786560 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48sks\" (UniqueName: \"kubernetes.io/projected/3e4ce542-8a53-4fea-b644-4ccb423a313b-kube-api-access-48sks\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.810903 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e4ce542-8a53-4fea-b644-4ccb423a313b-config\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:14 crc kubenswrapper[4959]: I0128 15:35:14.816409 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e4ce542-8a53-4fea-b644-4ccb423a313b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"3e4ce542-8a53-4fea-b644-4ccb423a313b\") " pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:15 crc kubenswrapper[4959]: I0128 15:35:15.024316 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 28 15:35:28 crc kubenswrapper[4959]: E0128 15:35:28.695170 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Jan 28 15:35:28 crc kubenswrapper[4959]: E0128 15:35:28.696206 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tsxtn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(d7f92998-4485-45fa-b5c5-cbb5211799c6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:35:28 crc kubenswrapper[4959]: E0128 15:35:28.698331 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="d7f92998-4485-45fa-b5c5-cbb5211799c6" Jan 28 15:35:29 crc kubenswrapper[4959]: E0128 15:35:29.253818 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="d7f92998-4485-45fa-b5c5-cbb5211799c6" Jan 28 15:35:30 crc kubenswrapper[4959]: E0128 15:35:30.107979 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 28 15:35:30 crc kubenswrapper[4959]: E0128 15:35:30.108392 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jjp6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(ee1fc53a-3817-4c94-8bd6-569c089c02cb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:35:30 crc kubenswrapper[4959]: E0128 15:35:30.109735 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="ee1fc53a-3817-4c94-8bd6-569c089c02cb" Jan 28 15:35:30 crc kubenswrapper[4959]: E0128 15:35:30.266011 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="ee1fc53a-3817-4c94-8bd6-569c089c02cb" Jan 28 15:35:31 crc kubenswrapper[4959]: E0128 15:35:31.177079 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Jan 28 15:35:31 crc kubenswrapper[4959]: E0128 15:35:31.177906 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n54hfbh64bh5b5hc9h5dh64h55fh666h65h584hc5h66h5bdh7h654h655h84h8chd6h5cdh566h54fhc6h77h5d7h7hc9h559h5fbh9dhfcq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xrqm4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(e7bad372-1c02-48b3-838d-7d5e97b30b57): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:35:31 crc kubenswrapper[4959]: E0128 15:35:31.179245 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="e7bad372-1c02-48b3-838d-7d5e97b30b57" Jan 28 15:35:31 crc kubenswrapper[4959]: E0128 15:35:31.228990 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 28 15:35:31 crc kubenswrapper[4959]: E0128 15:35:31.229256 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f86gs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(a81258f3-e48f-44f0-93d9-02e58302683a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:35:31 crc kubenswrapper[4959]: E0128 15:35:31.230867 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="a81258f3-e48f-44f0-93d9-02e58302683a" Jan 28 15:35:31 crc kubenswrapper[4959]: E0128 15:35:31.270601 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="a81258f3-e48f-44f0-93d9-02e58302683a" Jan 28 15:35:31 crc kubenswrapper[4959]: E0128 15:35:31.270847 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="e7bad372-1c02-48b3-838d-7d5e97b30b57" Jan 28 15:35:32 crc kubenswrapper[4959]: I0128 15:35:32.217406 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-zf2dp"] Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.201116 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-24g5r"] Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.202773 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.206222 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.220316 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-24g5r"] Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.316923 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-ovs-rundir\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.317008 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-combined-ca-bundle\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.317150 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxbsb\" (UniqueName: \"kubernetes.io/projected/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-kube-api-access-fxbsb\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.317319 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-ovn-rundir\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.317778 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-config\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.317841 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.419781 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-ovs-rundir\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.419846 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-combined-ca-bundle\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.419910 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxbsb\" (UniqueName: \"kubernetes.io/projected/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-kube-api-access-fxbsb\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.419962 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-ovn-rundir\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.420030 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-config\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.420057 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.420382 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-ovs-rundir\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.420778 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-ovn-rundir\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.421761 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-config\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.431474 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-combined-ca-bundle\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.437213 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.446717 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxbsb\" (UniqueName: \"kubernetes.io/projected/f9cf2a2e-c773-4fcc-86ea-01ed47c305cb-kube-api-access-fxbsb\") pod \"ovn-controller-metrics-24g5r\" (UID: \"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb\") " pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:34 crc kubenswrapper[4959]: I0128 15:35:34.545574 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-24g5r" Jan 28 15:35:44 crc kubenswrapper[4959]: W0128 15:35:44.706081 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8171da25_3ff2_431c_a3a6_482426500111.slice/crio-c60dbda9499d8d77fc12cb9b02ed106e992aa5f0c439c0492e5c4139eb4ee42b WatchSource:0}: Error finding container c60dbda9499d8d77fc12cb9b02ed106e992aa5f0c439c0492e5c4139eb4ee42b: Status 404 returned error can't find the container with id c60dbda9499d8d77fc12cb9b02ed106e992aa5f0c439c0492e5c4139eb4ee42b Jan 28 15:35:45 crc kubenswrapper[4959]: I0128 15:35:45.401755 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zf2dp" event={"ID":"8171da25-3ff2-431c-a3a6-482426500111","Type":"ContainerStarted","Data":"c60dbda9499d8d77fc12cb9b02ed106e992aa5f0c439c0492e5c4139eb4ee42b"} Jan 28 15:35:46 crc kubenswrapper[4959]: E0128 15:35:46.311415 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 15:35:46 crc kubenswrapper[4959]: E0128 15:35:46.312259 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tcpj7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-mx5pt_openstack(e1851d07-f0ce-46bf-88cb-8a4cc94e04ae): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:35:46 crc kubenswrapper[4959]: E0128 15:35:46.313459 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" Jan 28 15:35:46 crc kubenswrapper[4959]: E0128 15:35:46.413154 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" Jan 28 15:35:46 crc kubenswrapper[4959]: E0128 15:35:46.571754 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 15:35:46 crc kubenswrapper[4959]: E0128 15:35:46.572086 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rgts9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-w7lfc_openstack(b942b031-74b0-4bf7-8dae-75362fa7e393): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:35:46 crc kubenswrapper[4959]: E0128 15:35:46.573621 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" podUID="b942b031-74b0-4bf7-8dae-75362fa7e393" Jan 28 15:35:47 crc kubenswrapper[4959]: E0128 15:35:47.717074 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" podUID="b942b031-74b0-4bf7-8dae-75362fa7e393" Jan 28 15:35:47 crc kubenswrapper[4959]: I0128 15:35:47.876172 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.034425 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-bp544"] Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.113636 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.159481 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-24g5r"] Jan 28 15:35:48 crc kubenswrapper[4959]: W0128 15:35:48.389668 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod72dccd04_5057_4f99_942e_750ca1f7b3b5.slice/crio-456cb2f58b26a56e56b011f3aee8f521ce03e2dda11e72cd1c25aadb3ebd46d5 WatchSource:0}: Error finding container 456cb2f58b26a56e56b011f3aee8f521ce03e2dda11e72cd1c25aadb3ebd46d5: Status 404 returned error can't find the container with id 456cb2f58b26a56e56b011f3aee8f521ce03e2dda11e72cd1c25aadb3ebd46d5 Jan 28 15:35:48 crc kubenswrapper[4959]: W0128 15:35:48.390396 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93fc210e_4599_4436_b8e6_a20a8c5cd2b4.slice/crio-f711f8c8efb974cbf5e6b78dcbef1668b230567c06a54e90d413ebde53231dbe WatchSource:0}: Error finding container f711f8c8efb974cbf5e6b78dcbef1668b230567c06a54e90d413ebde53231dbe: Status 404 returned error can't find the container with id f711f8c8efb974cbf5e6b78dcbef1668b230567c06a54e90d413ebde53231dbe Jan 28 15:35:48 crc kubenswrapper[4959]: W0128 15:35:48.407898 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e4ce542_8a53_4fea_b644_4ccb423a313b.slice/crio-143a72a1f3f6583acd771fcef29f37c3acdeb5d52c21a84bf71e2e7e6ed2d185 WatchSource:0}: Error finding container 143a72a1f3f6583acd771fcef29f37c3acdeb5d52c21a84bf71e2e7e6ed2d185: Status 404 returned error can't find the container with id 143a72a1f3f6583acd771fcef29f37c3acdeb5d52c21a84bf71e2e7e6ed2d185 Jan 28 15:35:48 crc kubenswrapper[4959]: W0128 15:35:48.410846 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9cf2a2e_c773_4fcc_86ea_01ed47c305cb.slice/crio-8a609cfd6d946458a05116bb2be2bdc5dba2c97660665827e7ab27b88ae165ba WatchSource:0}: Error finding container 8a609cfd6d946458a05116bb2be2bdc5dba2c97660665827e7ab27b88ae165ba: Status 404 returned error can't find the container with id 8a609cfd6d946458a05116bb2be2bdc5dba2c97660665827e7ab27b88ae165ba Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.726258 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-24g5r" event={"ID":"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb","Type":"ContainerStarted","Data":"8a609cfd6d946458a05116bb2be2bdc5dba2c97660665827e7ab27b88ae165ba"} Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.728458 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"3e4ce542-8a53-4fea-b644-4ccb423a313b","Type":"ContainerStarted","Data":"143a72a1f3f6583acd771fcef29f37c3acdeb5d52c21a84bf71e2e7e6ed2d185"} Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.730574 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d7f92998-4485-45fa-b5c5-cbb5211799c6","Type":"ContainerStarted","Data":"4986b31572618b617829532599629630e2176f788ac4bdf728ea70b398edb985"} Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.735398 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"54187dbe-318c-4070-9771-d3d98fc10457","Type":"ContainerStarted","Data":"868d42fcbd9b9c55b3ed9428af6f061065dcc539d4d730024cd849c72b93fc14"} Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.737086 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"72dccd04-5057-4f99-942e-750ca1f7b3b5","Type":"ContainerStarted","Data":"456cb2f58b26a56e56b011f3aee8f521ce03e2dda11e72cd1c25aadb3ebd46d5"} Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.738628 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e7bad372-1c02-48b3-838d-7d5e97b30b57","Type":"ContainerStarted","Data":"5687278fee5e9ac2d3e5ec2606288fef5bcf461fde98b1fc4e86c0d47056dba3"} Jan 28 15:35:48 crc kubenswrapper[4959]: I0128 15:35:48.739821 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-bp544" event={"ID":"93fc210e-4599-4436-b8e6-a20a8c5cd2b4","Type":"ContainerStarted","Data":"f711f8c8efb974cbf5e6b78dcbef1668b230567c06a54e90d413ebde53231dbe"} Jan 28 15:35:49 crc kubenswrapper[4959]: E0128 15:35:49.103933 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 15:35:49 crc kubenswrapper[4959]: E0128 15:35:49.104251 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q2r6m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-s4mkv_openstack(0cf32512-b717-422a-ba5a-96dd9ed39548): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:35:49 crc kubenswrapper[4959]: E0128 15:35:49.105504 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" podUID="0cf32512-b717-422a-ba5a-96dd9ed39548" Jan 28 15:35:49 crc kubenswrapper[4959]: I0128 15:35:49.748274 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 28 15:35:49 crc kubenswrapper[4959]: I0128 15:35:49.801545 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=5.364713789 podStartE2EDuration="45.801407424s" podCreationTimestamp="2026-01-28 15:35:04 +0000 UTC" firstStartedPulling="2026-01-28 15:35:06.183654951 +0000 UTC m=+1089.629561334" lastFinishedPulling="2026-01-28 15:35:46.620348596 +0000 UTC m=+1130.066254969" observedRunningTime="2026-01-28 15:35:49.78985691 +0000 UTC m=+1133.235763293" watchObservedRunningTime="2026-01-28 15:35:49.801407424 +0000 UTC m=+1133.247313807" Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.297955 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.405241 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-dns-svc\") pod \"0cf32512-b717-422a-ba5a-96dd9ed39548\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.405398 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-config\") pod \"0cf32512-b717-422a-ba5a-96dd9ed39548\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.405551 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2r6m\" (UniqueName: \"kubernetes.io/projected/0cf32512-b717-422a-ba5a-96dd9ed39548-kube-api-access-q2r6m\") pod \"0cf32512-b717-422a-ba5a-96dd9ed39548\" (UID: \"0cf32512-b717-422a-ba5a-96dd9ed39548\") " Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.405942 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0cf32512-b717-422a-ba5a-96dd9ed39548" (UID: "0cf32512-b717-422a-ba5a-96dd9ed39548"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.406069 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-config" (OuterVolumeSpecName: "config") pod "0cf32512-b717-422a-ba5a-96dd9ed39548" (UID: "0cf32512-b717-422a-ba5a-96dd9ed39548"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.412917 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cf32512-b717-422a-ba5a-96dd9ed39548-kube-api-access-q2r6m" (OuterVolumeSpecName: "kube-api-access-q2r6m") pod "0cf32512-b717-422a-ba5a-96dd9ed39548" (UID: "0cf32512-b717-422a-ba5a-96dd9ed39548"). InnerVolumeSpecName "kube-api-access-q2r6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.507716 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2r6m\" (UniqueName: \"kubernetes.io/projected/0cf32512-b717-422a-ba5a-96dd9ed39548-kube-api-access-q2r6m\") on node \"crc\" DevicePath \"\"" Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.507763 4959 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 15:35:50 crc kubenswrapper[4959]: I0128 15:35:50.507773 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cf32512-b717-422a-ba5a-96dd9ed39548-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:35:51 crc kubenswrapper[4959]: I0128 15:35:50.761669 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" Jan 28 15:35:51 crc kubenswrapper[4959]: I0128 15:35:51.059605 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-s4mkv" event={"ID":"0cf32512-b717-422a-ba5a-96dd9ed39548","Type":"ContainerDied","Data":"cb3ca695c904d560c0e59cdc638906d14e5789f3f21322b9fbe38368942ce17a"} Jan 28 15:35:51 crc kubenswrapper[4959]: I0128 15:35:51.177096 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4mkv"] Jan 28 15:35:51 crc kubenswrapper[4959]: I0128 15:35:51.191332 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4mkv"] Jan 28 15:35:51 crc kubenswrapper[4959]: E0128 15:35:51.846126 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 15:35:51 crc kubenswrapper[4959]: E0128 15:35:51.846309 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jb9tj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-lf926_openstack(51911834-44ad-4f5b-978b-0fc2099b753d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:35:51 crc kubenswrapper[4959]: E0128 15:35:51.847500 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" podUID="51911834-44ad-4f5b-978b-0fc2099b753d" Jan 28 15:35:52 crc kubenswrapper[4959]: I0128 15:35:52.599813 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cf32512-b717-422a-ba5a-96dd9ed39548" path="/var/lib/kubelet/pods/0cf32512-b717-422a-ba5a-96dd9ed39548/volumes" Jan 28 15:35:55 crc kubenswrapper[4959]: I0128 15:35:55.482297 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.601665 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-w7lfc"] Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.645512 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-d4zkj"] Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.647739 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.703264 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-d4zkj"] Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.764412 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.765459 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-config\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.765568 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwlv2\" (UniqueName: \"kubernetes.io/projected/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-kube-api-access-vwlv2\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.874816 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-config\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.875404 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwlv2\" (UniqueName: \"kubernetes.io/projected/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-kube-api-access-vwlv2\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.875490 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.876071 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-config\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.876658 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.919921 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwlv2\" (UniqueName: \"kubernetes.io/projected/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-kube-api-access-vwlv2\") pod \"dnsmasq-dns-7cb5889db5-d4zkj\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:57 crc kubenswrapper[4959]: I0128 15:35:57.979706 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.672430 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.678875 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.685759 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-5z2kf" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.686075 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.686261 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.686446 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.709349 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.798710 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.798791 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c21863c-592f-436a-8fe2-06b0f78b7755-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.798851 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.798873 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2c21863c-592f-436a-8fe2-06b0f78b7755-lock\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.799021 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6b6w\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-kube-api-access-f6b6w\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.799176 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2c21863c-592f-436a-8fe2-06b0f78b7755-cache\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.900708 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2c21863c-592f-436a-8fe2-06b0f78b7755-lock\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.900781 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6b6w\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-kube-api-access-f6b6w\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.900827 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2c21863c-592f-436a-8fe2-06b0f78b7755-cache\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.900884 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.900914 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c21863c-592f-436a-8fe2-06b0f78b7755-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.901285 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2c21863c-592f-436a-8fe2-06b0f78b7755-lock\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.901415 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2c21863c-592f-436a-8fe2-06b0f78b7755-cache\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.901334 4959 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: E0128 15:35:58.901612 4959 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 15:35:58 crc kubenswrapper[4959]: E0128 15:35:58.901653 4959 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 15:35:58 crc kubenswrapper[4959]: E0128 15:35:58.901729 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift podName:2c21863c-592f-436a-8fe2-06b0f78b7755 nodeName:}" failed. No retries permitted until 2026-01-28 15:35:59.401703804 +0000 UTC m=+1142.847610357 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift") pod "swift-storage-0" (UID: "2c21863c-592f-436a-8fe2-06b0f78b7755") : configmap "swift-ring-files" not found Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.901407 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.908427 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c21863c-592f-436a-8fe2-06b0f78b7755-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.924729 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:58 crc kubenswrapper[4959]: I0128 15:35:58.933522 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6b6w\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-kube-api-access-f6b6w\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.244457 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-c8wlm"] Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.246081 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.251747 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.251987 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.252220 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.259800 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-c8wlm"] Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.413172 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-swiftconf\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.413292 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-scripts\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.413342 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-ring-data-devices\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.413593 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b009e707-15ca-458c-ab37-d3cab102e497-etc-swift\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.413852 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-combined-ca-bundle\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.413965 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zxq5\" (UniqueName: \"kubernetes.io/projected/b009e707-15ca-458c-ab37-d3cab102e497-kube-api-access-6zxq5\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.414030 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-dispersionconf\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.414196 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:35:59 crc kubenswrapper[4959]: E0128 15:35:59.414427 4959 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 15:35:59 crc kubenswrapper[4959]: E0128 15:35:59.414479 4959 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 15:35:59 crc kubenswrapper[4959]: E0128 15:35:59.414581 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift podName:2c21863c-592f-436a-8fe2-06b0f78b7755 nodeName:}" failed. No retries permitted until 2026-01-28 15:36:00.414535493 +0000 UTC m=+1143.860441876 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift") pod "swift-storage-0" (UID: "2c21863c-592f-436a-8fe2-06b0f78b7755") : configmap "swift-ring-files" not found Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.516493 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-combined-ca-bundle\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.516596 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zxq5\" (UniqueName: \"kubernetes.io/projected/b009e707-15ca-458c-ab37-d3cab102e497-kube-api-access-6zxq5\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.516680 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-dispersionconf\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.516775 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-swiftconf\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.516806 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-scripts\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.516865 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-ring-data-devices\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.516902 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b009e707-15ca-458c-ab37-d3cab102e497-etc-swift\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.517527 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b009e707-15ca-458c-ab37-d3cab102e497-etc-swift\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.519040 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-ring-data-devices\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.519065 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-scripts\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.524411 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-dispersionconf\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.526947 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-swiftconf\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.538706 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-combined-ca-bundle\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.541267 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zxq5\" (UniqueName: \"kubernetes.io/projected/b009e707-15ca-458c-ab37-d3cab102e497-kube-api-access-6zxq5\") pod \"swift-ring-rebalance-c8wlm\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:35:59 crc kubenswrapper[4959]: I0128 15:35:59.572583 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:36:00 crc kubenswrapper[4959]: I0128 15:36:00.442387 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:36:00 crc kubenswrapper[4959]: E0128 15:36:00.442646 4959 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 15:36:00 crc kubenswrapper[4959]: E0128 15:36:00.443052 4959 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 15:36:00 crc kubenswrapper[4959]: E0128 15:36:00.443135 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift podName:2c21863c-592f-436a-8fe2-06b0f78b7755 nodeName:}" failed. No retries permitted until 2026-01-28 15:36:02.443098491 +0000 UTC m=+1145.889004874 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift") pod "swift-storage-0" (UID: "2c21863c-592f-436a-8fe2-06b0f78b7755") : configmap "swift-ring-files" not found Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.483904 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:36:02 crc kubenswrapper[4959]: E0128 15:36:02.484197 4959 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 15:36:02 crc kubenswrapper[4959]: E0128 15:36:02.484237 4959 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 15:36:02 crc kubenswrapper[4959]: E0128 15:36:02.484324 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift podName:2c21863c-592f-436a-8fe2-06b0f78b7755 nodeName:}" failed. No retries permitted until 2026-01-28 15:36:06.484299246 +0000 UTC m=+1149.930205629 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift") pod "swift-storage-0" (UID: "2c21863c-592f-436a-8fe2-06b0f78b7755") : configmap "swift-ring-files" not found Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.656344 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.688396 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jb9tj\" (UniqueName: \"kubernetes.io/projected/51911834-44ad-4f5b-978b-0fc2099b753d-kube-api-access-jb9tj\") pod \"51911834-44ad-4f5b-978b-0fc2099b753d\" (UID: \"51911834-44ad-4f5b-978b-0fc2099b753d\") " Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.688475 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51911834-44ad-4f5b-978b-0fc2099b753d-config\") pod \"51911834-44ad-4f5b-978b-0fc2099b753d\" (UID: \"51911834-44ad-4f5b-978b-0fc2099b753d\") " Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.689580 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51911834-44ad-4f5b-978b-0fc2099b753d-config" (OuterVolumeSpecName: "config") pod "51911834-44ad-4f5b-978b-0fc2099b753d" (UID: "51911834-44ad-4f5b-978b-0fc2099b753d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.697931 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51911834-44ad-4f5b-978b-0fc2099b753d-kube-api-access-jb9tj" (OuterVolumeSpecName: "kube-api-access-jb9tj") pod "51911834-44ad-4f5b-978b-0fc2099b753d" (UID: "51911834-44ad-4f5b-978b-0fc2099b753d"). InnerVolumeSpecName "kube-api-access-jb9tj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.790569 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jb9tj\" (UniqueName: \"kubernetes.io/projected/51911834-44ad-4f5b-978b-0fc2099b753d-kube-api-access-jb9tj\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.790952 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51911834-44ad-4f5b-978b-0fc2099b753d-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.876689 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" event={"ID":"51911834-44ad-4f5b-978b-0fc2099b753d","Type":"ContainerDied","Data":"57a5076634da648d72b3837bd444e07f36becc3f8d7c8a9ad90eb60d9cfbdf9b"} Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.876843 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lf926" Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.942401 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lf926"] Jan 28 15:36:02 crc kubenswrapper[4959]: I0128 15:36:02.948127 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lf926"] Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.220309 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.417526 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-config\") pod \"b942b031-74b0-4bf7-8dae-75362fa7e393\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.417663 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgts9\" (UniqueName: \"kubernetes.io/projected/b942b031-74b0-4bf7-8dae-75362fa7e393-kube-api-access-rgts9\") pod \"b942b031-74b0-4bf7-8dae-75362fa7e393\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.417755 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-dns-svc\") pod \"b942b031-74b0-4bf7-8dae-75362fa7e393\" (UID: \"b942b031-74b0-4bf7-8dae-75362fa7e393\") " Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.418398 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-config" (OuterVolumeSpecName: "config") pod "b942b031-74b0-4bf7-8dae-75362fa7e393" (UID: "b942b031-74b0-4bf7-8dae-75362fa7e393"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.418523 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b942b031-74b0-4bf7-8dae-75362fa7e393" (UID: "b942b031-74b0-4bf7-8dae-75362fa7e393"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.425979 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b942b031-74b0-4bf7-8dae-75362fa7e393-kube-api-access-rgts9" (OuterVolumeSpecName: "kube-api-access-rgts9") pod "b942b031-74b0-4bf7-8dae-75362fa7e393" (UID: "b942b031-74b0-4bf7-8dae-75362fa7e393"). InnerVolumeSpecName "kube-api-access-rgts9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.519952 4959 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.519988 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b942b031-74b0-4bf7-8dae-75362fa7e393-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.520001 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgts9\" (UniqueName: \"kubernetes.io/projected/b942b031-74b0-4bf7-8dae-75362fa7e393-kube-api-access-rgts9\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.598715 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51911834-44ad-4f5b-978b-0fc2099b753d" path="/var/lib/kubelet/pods/51911834-44ad-4f5b-978b-0fc2099b753d/volumes" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.905722 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" event={"ID":"b942b031-74b0-4bf7-8dae-75362fa7e393","Type":"ContainerDied","Data":"edba64655d165ac8b69502a13ca2aada7efaa5f54dbe97ad20ba587dc2a0a0ac"} Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.905855 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-w7lfc" Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.957937 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-w7lfc"] Jan 28 15:36:04 crc kubenswrapper[4959]: I0128 15:36:04.966227 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-w7lfc"] Jan 28 15:36:06 crc kubenswrapper[4959]: E0128 15:36:06.375384 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified" Jan 28 15:36:06 crc kubenswrapper[4959]: E0128 15:36:06.376064 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n568h5b8h695h5bdh5b4hf9h568h585hfdh687h557h675h67hc5h58ch675h556h5c6h565hddh79h88hbbh5fch5dbh576hbch5cdh55h548h5b5h645q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l6fh8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-zf2dp_openstack(8171da25-3ff2-431c-a3a6-482426500111): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:36:06 crc kubenswrapper[4959]: E0128 15:36:06.377574 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-zf2dp" podUID="8171da25-3ff2-431c-a3a6-482426500111" Jan 28 15:36:06 crc kubenswrapper[4959]: I0128 15:36:06.570045 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:36:06 crc kubenswrapper[4959]: E0128 15:36:06.570358 4959 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 15:36:06 crc kubenswrapper[4959]: E0128 15:36:06.570395 4959 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 15:36:06 crc kubenswrapper[4959]: E0128 15:36:06.570477 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift podName:2c21863c-592f-436a-8fe2-06b0f78b7755 nodeName:}" failed. No retries permitted until 2026-01-28 15:36:14.570454318 +0000 UTC m=+1158.016360701 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift") pod "swift-storage-0" (UID: "2c21863c-592f-436a-8fe2-06b0f78b7755") : configmap "swift-ring-files" not found Jan 28 15:36:06 crc kubenswrapper[4959]: I0128 15:36:06.600560 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b942b031-74b0-4bf7-8dae-75362fa7e393" path="/var/lib/kubelet/pods/b942b031-74b0-4bf7-8dae-75362fa7e393/volumes" Jan 28 15:36:06 crc kubenswrapper[4959]: E0128 15:36:06.925643 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\\\"\"" pod="openstack/ovn-controller-ovs-zf2dp" podUID="8171da25-3ff2-431c-a3a6-482426500111" Jan 28 15:36:08 crc kubenswrapper[4959]: I0128 15:36:08.253151 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-c8wlm"] Jan 28 15:36:08 crc kubenswrapper[4959]: I0128 15:36:08.306906 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-d4zkj"] Jan 28 15:36:14 crc kubenswrapper[4959]: I0128 15:36:14.643434 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:36:14 crc kubenswrapper[4959]: E0128 15:36:14.643909 4959 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 15:36:14 crc kubenswrapper[4959]: E0128 15:36:14.644598 4959 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 15:36:14 crc kubenswrapper[4959]: E0128 15:36:14.644711 4959 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift podName:2c21863c-592f-436a-8fe2-06b0f78b7755 nodeName:}" failed. No retries permitted until 2026-01-28 15:36:30.644678681 +0000 UTC m=+1174.090585104 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift") pod "swift-storage-0" (UID: "2c21863c-592f-436a-8fe2-06b0f78b7755") : configmap "swift-ring-files" not found Jan 28 15:36:14 crc kubenswrapper[4959]: I0128 15:36:14.996575 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-c8wlm" event={"ID":"b009e707-15ca-458c-ab37-d3cab102e497","Type":"ContainerStarted","Data":"2d7a9b16e7f8546ee020d2f569d6d8ad58be76573fb66249dfecfd90900ea07a"} Jan 28 15:36:14 crc kubenswrapper[4959]: I0128 15:36:14.997969 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" event={"ID":"3fa1ab35-959f-4009-beb9-fc4e7dc1b825","Type":"ContainerStarted","Data":"e90d511b762588fe7c525bf470ff757aff076c0c8ea9eea6f7ed71d04a30007f"} Jan 28 15:36:15 crc kubenswrapper[4959]: E0128 15:36:15.354303 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Jan 28 15:36:15 crc kubenswrapper[4959]: E0128 15:36:15.354399 4959 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Jan 28 15:36:15 crc kubenswrapper[4959]: E0128 15:36:15.354619 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wnt92,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(4c05f0db-12b1-4491-8680-2df359888603): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" logger="UnhandledError" Jan 28 15:36:15 crc kubenswrapper[4959]: E0128 15:36:15.355912 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="4c05f0db-12b1-4491-8680-2df359888603" Jan 28 15:36:16 crc kubenswrapper[4959]: E0128 15:36:16.011547 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="4c05f0db-12b1-4491-8680-2df359888603" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.021205 4959 generic.go:334] "Generic (PLEG): container finished" podID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" containerID="57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3" exitCode=0 Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.021285 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" event={"ID":"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae","Type":"ContainerDied","Data":"57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3"} Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.025867 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"3e4ce542-8a53-4fea-b644-4ccb423a313b","Type":"ContainerStarted","Data":"4a939dab211e72cb4a104a9bd60a1efa828a248b15fb072a2bb1999a21209a9a"} Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.029308 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"72dccd04-5057-4f99-942e-750ca1f7b3b5","Type":"ContainerStarted","Data":"c8f1c12ea2ea63a68e56464a0739cb521a02f90812a0a60ccdb2045357934b8b"} Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.029340 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"72dccd04-5057-4f99-942e-750ca1f7b3b5","Type":"ContainerStarted","Data":"8cc929581ec226f78646aa47ea92c96a07f26073401c5fc43016401e9bac6cd2"} Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.031618 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-bp544" event={"ID":"93fc210e-4599-4436-b8e6-a20a8c5cd2b4","Type":"ContainerStarted","Data":"079c051f2e1eaf2ef584f7560960e8eaef38cc706718de3c02a7cf79ca1a241a"} Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.031694 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-bp544" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.034096 4959 generic.go:334] "Generic (PLEG): container finished" podID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" containerID="ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa" exitCode=0 Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.034174 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" event={"ID":"3fa1ab35-959f-4009-beb9-fc4e7dc1b825","Type":"ContainerDied","Data":"ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa"} Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.038487 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-24g5r" event={"ID":"f9cf2a2e-c773-4fcc-86ea-01ed47c305cb","Type":"ContainerStarted","Data":"52696f7b52fcedbee271406a822b7539d3e99f45fc2b0fc0920c1ac3c44a35ff"} Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.080735 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=40.736359141 podStartE2EDuration="1m8.080701414s" podCreationTimestamp="2026-01-28 15:35:09 +0000 UTC" firstStartedPulling="2026-01-28 15:35:48.390993778 +0000 UTC m=+1131.836900161" lastFinishedPulling="2026-01-28 15:36:15.735336051 +0000 UTC m=+1159.181242434" observedRunningTime="2026-01-28 15:36:17.071583599 +0000 UTC m=+1160.517490002" watchObservedRunningTime="2026-01-28 15:36:17.080701414 +0000 UTC m=+1160.526607797" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.111729 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-bp544" podStartSLOduration=37.805769511 podStartE2EDuration="1m5.111699548s" podCreationTimestamp="2026-01-28 15:35:12 +0000 UTC" firstStartedPulling="2026-01-28 15:35:48.417283226 +0000 UTC m=+1131.863189609" lastFinishedPulling="2026-01-28 15:36:15.723213263 +0000 UTC m=+1159.169119646" observedRunningTime="2026-01-28 15:36:17.11013496 +0000 UTC m=+1160.556041363" watchObservedRunningTime="2026-01-28 15:36:17.111699548 +0000 UTC m=+1160.557605951" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.145735 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-24g5r" podStartSLOduration=15.793621994 podStartE2EDuration="43.145703708s" podCreationTimestamp="2026-01-28 15:35:34 +0000 UTC" firstStartedPulling="2026-01-28 15:35:48.414323873 +0000 UTC m=+1131.860230256" lastFinishedPulling="2026-01-28 15:36:15.766405587 +0000 UTC m=+1159.212311970" observedRunningTime="2026-01-28 15:36:17.141046362 +0000 UTC m=+1160.586952765" watchObservedRunningTime="2026-01-28 15:36:17.145703708 +0000 UTC m=+1160.591610111" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.542534 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-mx5pt"] Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.599052 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-nlfvk"] Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.607304 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.616688 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-nlfvk"] Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.618229 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.721964 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.722068 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.722093 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6mk4\" (UniqueName: \"kubernetes.io/projected/578a4154-8845-46b5-885d-57c3dd0a22b0-kube-api-access-x6mk4\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.722216 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-config\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.723008 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-d4zkj"] Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.755925 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-4spbq"] Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.757863 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.762432 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.777963 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-4spbq"] Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823552 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823605 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823625 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823669 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823690 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6mk4\" (UniqueName: \"kubernetes.io/projected/578a4154-8845-46b5-885d-57c3dd0a22b0-kube-api-access-x6mk4\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823712 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-config\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823754 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-config\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823798 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnbck\" (UniqueName: \"kubernetes.io/projected/2dd19028-8bc9-48e7-a361-8581aa9d5d29-kube-api-access-wnbck\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.823823 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-dns-svc\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.825143 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-config\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.825151 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.825768 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-dns-svc\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.872118 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6mk4\" (UniqueName: \"kubernetes.io/projected/578a4154-8845-46b5-885d-57c3dd0a22b0-kube-api-access-x6mk4\") pod \"dnsmasq-dns-74f6f696b9-nlfvk\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.925457 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-config\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.925540 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnbck\" (UniqueName: \"kubernetes.io/projected/2dd19028-8bc9-48e7-a361-8581aa9d5d29-kube-api-access-wnbck\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.925571 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-dns-svc\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.925673 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.925699 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.926703 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.927301 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-dns-svc\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.927815 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-config\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.928707 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.940075 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:17 crc kubenswrapper[4959]: I0128 15:36:17.955134 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnbck\" (UniqueName: \"kubernetes.io/projected/2dd19028-8bc9-48e7-a361-8581aa9d5d29-kube-api-access-wnbck\") pod \"dnsmasq-dns-698758b865-4spbq\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:18 crc kubenswrapper[4959]: I0128 15:36:18.050653 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ee1fc53a-3817-4c94-8bd6-569c089c02cb","Type":"ContainerStarted","Data":"95fde7fc0919101006d40aab1d39f00eac950d7d8fdabc9d9734b0f2c8980756"} Jan 28 15:36:18 crc kubenswrapper[4959]: I0128 15:36:18.055446 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a81258f3-e48f-44f0-93d9-02e58302683a","Type":"ContainerStarted","Data":"bdb6462959cea0d57d3c002a05a390ad101555be0a54d21096014e3c5c1c29cc"} Jan 28 15:36:18 crc kubenswrapper[4959]: I0128 15:36:18.129717 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:19 crc kubenswrapper[4959]: I0128 15:36:19.066332 4959 generic.go:334] "Generic (PLEG): container finished" podID="d7f92998-4485-45fa-b5c5-cbb5211799c6" containerID="4986b31572618b617829532599629630e2176f788ac4bdf728ea70b398edb985" exitCode=0 Jan 28 15:36:19 crc kubenswrapper[4959]: I0128 15:36:19.066447 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d7f92998-4485-45fa-b5c5-cbb5211799c6","Type":"ContainerDied","Data":"4986b31572618b617829532599629630e2176f788ac4bdf728ea70b398edb985"} Jan 28 15:36:19 crc kubenswrapper[4959]: I0128 15:36:19.079271 4959 generic.go:334] "Generic (PLEG): container finished" podID="54187dbe-318c-4070-9771-d3d98fc10457" containerID="868d42fcbd9b9c55b3ed9428af6f061065dcc539d4d730024cd849c72b93fc14" exitCode=0 Jan 28 15:36:19 crc kubenswrapper[4959]: I0128 15:36:19.079332 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"54187dbe-318c-4070-9771-d3d98fc10457","Type":"ContainerDied","Data":"868d42fcbd9b9c55b3ed9428af6f061065dcc539d4d730024cd849c72b93fc14"} Jan 28 15:36:19 crc kubenswrapper[4959]: I0128 15:36:19.893379 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 28 15:36:20 crc kubenswrapper[4959]: I0128 15:36:20.006875 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 28 15:36:20 crc kubenswrapper[4959]: I0128 15:36:20.096903 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"3e4ce542-8a53-4fea-b644-4ccb423a313b","Type":"ContainerStarted","Data":"7676f57a83e26f63f4cf3a71e74a151db6d05cb9288e76431c09c6f51001aedf"} Jan 28 15:36:20 crc kubenswrapper[4959]: I0128 15:36:20.100858 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" event={"ID":"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae","Type":"ContainerStarted","Data":"2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b"} Jan 28 15:36:20 crc kubenswrapper[4959]: I0128 15:36:20.101098 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 28 15:36:20 crc kubenswrapper[4959]: I0128 15:36:20.242712 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-4spbq"] Jan 28 15:36:20 crc kubenswrapper[4959]: W0128 15:36:20.249568 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod578a4154_8845_46b5_885d_57c3dd0a22b0.slice/crio-cd39a6895b2d19b4293f31010b85a5d635321340972e7bbf6c2bbd162da3e4af WatchSource:0}: Error finding container cd39a6895b2d19b4293f31010b85a5d635321340972e7bbf6c2bbd162da3e4af: Status 404 returned error can't find the container with id cd39a6895b2d19b4293f31010b85a5d635321340972e7bbf6c2bbd162da3e4af Jan 28 15:36:20 crc kubenswrapper[4959]: I0128 15:36:20.251618 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-nlfvk"] Jan 28 15:36:20 crc kubenswrapper[4959]: W0128 15:36:20.266606 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2dd19028_8bc9_48e7_a361_8581aa9d5d29.slice/crio-1ee7d042fe883cf5c7396756fcd1255a7be2a6c06a00b77493b1c4f840dfc985 WatchSource:0}: Error finding container 1ee7d042fe883cf5c7396756fcd1255a7be2a6c06a00b77493b1c4f840dfc985: Status 404 returned error can't find the container with id 1ee7d042fe883cf5c7396756fcd1255a7be2a6c06a00b77493b1c4f840dfc985 Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.110173 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-c8wlm" event={"ID":"b009e707-15ca-458c-ab37-d3cab102e497","Type":"ContainerStarted","Data":"46775a6829f3798e82b6a16491062df775f07a96e8becbb731e81f351b330cd7"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.112225 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"54187dbe-318c-4070-9771-d3d98fc10457","Type":"ContainerStarted","Data":"a2219c3938a39c172408e3e618891fc209d73f67b9a32c3214e4463c85aa8fbc"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.115243 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" event={"ID":"3fa1ab35-959f-4009-beb9-fc4e7dc1b825","Type":"ContainerStarted","Data":"b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.115319 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" podUID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" containerName="dnsmasq-dns" containerID="cri-o://b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1" gracePeriod=10 Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.115384 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.120937 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"d7f92998-4485-45fa-b5c5-cbb5211799c6","Type":"ContainerStarted","Data":"d339a1361c39ed9ad4660504e6c75b3d6bb22c069baef6d9eb22c84afa4113f4"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.122832 4959 generic.go:334] "Generic (PLEG): container finished" podID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerID="91cea2f5be084bdbff9356a0b39ef083492cf52c78f8d1c7db7232bd9850bfde" exitCode=0 Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.122924 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-4spbq" event={"ID":"2dd19028-8bc9-48e7-a361-8581aa9d5d29","Type":"ContainerDied","Data":"91cea2f5be084bdbff9356a0b39ef083492cf52c78f8d1c7db7232bd9850bfde"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.123179 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-4spbq" event={"ID":"2dd19028-8bc9-48e7-a361-8581aa9d5d29","Type":"ContainerStarted","Data":"1ee7d042fe883cf5c7396756fcd1255a7be2a6c06a00b77493b1c4f840dfc985"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.124942 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zf2dp" event={"ID":"8171da25-3ff2-431c-a3a6-482426500111","Type":"ContainerStarted","Data":"7dcc6150f9852620755e883307f96c090172cecb9eeb3732af1efe3c7df644d5"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.126715 4959 generic.go:334] "Generic (PLEG): container finished" podID="578a4154-8845-46b5-885d-57c3dd0a22b0" containerID="eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4" exitCode=0 Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.127196 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" containerName="dnsmasq-dns" containerID="cri-o://2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b" gracePeriod=10 Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.127793 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" event={"ID":"578a4154-8845-46b5-885d-57c3dd0a22b0","Type":"ContainerDied","Data":"eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.128071 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.128145 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" event={"ID":"578a4154-8845-46b5-885d-57c3dd0a22b0","Type":"ContainerStarted","Data":"cd39a6895b2d19b4293f31010b85a5d635321340972e7bbf6c2bbd162da3e4af"} Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.146370 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-c8wlm" podStartSLOduration=16.609597748 podStartE2EDuration="22.146346138s" podCreationTimestamp="2026-01-28 15:35:59 +0000 UTC" firstStartedPulling="2026-01-28 15:36:14.246715896 +0000 UTC m=+1157.692622289" lastFinishedPulling="2026-01-28 15:36:19.783464296 +0000 UTC m=+1163.229370679" observedRunningTime="2026-01-28 15:36:21.136019314 +0000 UTC m=+1164.581925697" watchObservedRunningTime="2026-01-28 15:36:21.146346138 +0000 UTC m=+1164.592252521" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.171752 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371957.683044 podStartE2EDuration="1m19.171731672s" podCreationTimestamp="2026-01-28 15:35:02 +0000 UTC" firstStartedPulling="2026-01-28 15:35:04.903230851 +0000 UTC m=+1088.349137234" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:21.167814686 +0000 UTC m=+1164.613721069" watchObservedRunningTime="2026-01-28 15:36:21.171731672 +0000 UTC m=+1164.617638055" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.182954 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.211083 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=40.89197232 podStartE2EDuration="1m8.21106364s" podCreationTimestamp="2026-01-28 15:35:13 +0000 UTC" firstStartedPulling="2026-01-28 15:35:48.413662387 +0000 UTC m=+1131.859568770" lastFinishedPulling="2026-01-28 15:36:15.732753687 +0000 UTC m=+1159.178660090" observedRunningTime="2026-01-28 15:36:21.192312949 +0000 UTC m=+1164.638219352" watchObservedRunningTime="2026-01-28 15:36:21.21106364 +0000 UTC m=+1164.656970013" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.236386 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" podStartSLOduration=6.918374065 podStartE2EDuration="1m21.236355002s" podCreationTimestamp="2026-01-28 15:35:00 +0000 UTC" firstStartedPulling="2026-01-28 15:35:01.417484197 +0000 UTC m=+1084.863390580" lastFinishedPulling="2026-01-28 15:36:15.735465134 +0000 UTC m=+1159.181371517" observedRunningTime="2026-01-28 15:36:21.226312355 +0000 UTC m=+1164.672218738" watchObservedRunningTime="2026-01-28 15:36:21.236355002 +0000 UTC m=+1164.682261385" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.253070 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" podStartSLOduration=22.725122978 podStartE2EDuration="24.253044863s" podCreationTimestamp="2026-01-28 15:35:57 +0000 UTC" firstStartedPulling="2026-01-28 15:36:14.25417687 +0000 UTC m=+1157.700083253" lastFinishedPulling="2026-01-28 15:36:15.782098745 +0000 UTC m=+1159.228005138" observedRunningTime="2026-01-28 15:36:21.246864101 +0000 UTC m=+1164.692770484" watchObservedRunningTime="2026-01-28 15:36:21.253044863 +0000 UTC m=+1164.698951246" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.294458 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=37.7958075 podStartE2EDuration="1m18.294423561s" podCreationTimestamp="2026-01-28 15:35:03 +0000 UTC" firstStartedPulling="2026-01-28 15:35:05.663036741 +0000 UTC m=+1089.108943124" lastFinishedPulling="2026-01-28 15:35:46.161652792 +0000 UTC m=+1129.607559185" observedRunningTime="2026-01-28 15:36:21.278181871 +0000 UTC m=+1164.724088254" watchObservedRunningTime="2026-01-28 15:36:21.294423561 +0000 UTC m=+1164.740329944" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.930392 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:36:21 crc kubenswrapper[4959]: I0128 15:36:21.936505 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.035985 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwlv2\" (UniqueName: \"kubernetes.io/projected/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-kube-api-access-vwlv2\") pod \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.036088 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-dns-svc\") pod \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.036281 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-config\") pod \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.036313 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-config\") pod \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\" (UID: \"3fa1ab35-959f-4009-beb9-fc4e7dc1b825\") " Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.036340 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcpj7\" (UniqueName: \"kubernetes.io/projected/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-kube-api-access-tcpj7\") pod \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.036432 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-dns-svc\") pod \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\" (UID: \"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae\") " Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.046375 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-kube-api-access-tcpj7" (OuterVolumeSpecName: "kube-api-access-tcpj7") pod "e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" (UID: "e1851d07-f0ce-46bf-88cb-8a4cc94e04ae"). InnerVolumeSpecName "kube-api-access-tcpj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.048980 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-kube-api-access-vwlv2" (OuterVolumeSpecName: "kube-api-access-vwlv2") pod "3fa1ab35-959f-4009-beb9-fc4e7dc1b825" (UID: "3fa1ab35-959f-4009-beb9-fc4e7dc1b825"). InnerVolumeSpecName "kube-api-access-vwlv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.079599 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-config" (OuterVolumeSpecName: "config") pod "e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" (UID: "e1851d07-f0ce-46bf-88cb-8a4cc94e04ae"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.079614 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-config" (OuterVolumeSpecName: "config") pod "3fa1ab35-959f-4009-beb9-fc4e7dc1b825" (UID: "3fa1ab35-959f-4009-beb9-fc4e7dc1b825"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.100862 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3fa1ab35-959f-4009-beb9-fc4e7dc1b825" (UID: "3fa1ab35-959f-4009-beb9-fc4e7dc1b825"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.118037 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" (UID: "e1851d07-f0ce-46bf-88cb-8a4cc94e04ae"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.138573 4959 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.138657 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwlv2\" (UniqueName: \"kubernetes.io/projected/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-kube-api-access-vwlv2\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.138674 4959 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.138687 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.138704 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3fa1ab35-959f-4009-beb9-fc4e7dc1b825-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.138720 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcpj7\" (UniqueName: \"kubernetes.io/projected/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae-kube-api-access-tcpj7\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.140927 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" event={"ID":"578a4154-8845-46b5-885d-57c3dd0a22b0","Type":"ContainerStarted","Data":"75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4"} Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.141218 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.152961 4959 generic.go:334] "Generic (PLEG): container finished" podID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" containerID="b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1" exitCode=0 Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.153054 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" event={"ID":"3fa1ab35-959f-4009-beb9-fc4e7dc1b825","Type":"ContainerDied","Data":"b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1"} Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.153097 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" event={"ID":"3fa1ab35-959f-4009-beb9-fc4e7dc1b825","Type":"ContainerDied","Data":"e90d511b762588fe7c525bf470ff757aff076c0c8ea9eea6f7ed71d04a30007f"} Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.153148 4959 scope.go:117] "RemoveContainer" containerID="b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.153368 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-d4zkj" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.158072 4959 generic.go:334] "Generic (PLEG): container finished" podID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" containerID="2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b" exitCode=0 Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.158151 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" event={"ID":"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae","Type":"ContainerDied","Data":"2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b"} Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.158181 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" event={"ID":"e1851d07-f0ce-46bf-88cb-8a4cc94e04ae","Type":"ContainerDied","Data":"a8725f279d610e155bc12bbd225a0af5075591745f7da01f5319a2ec0875c981"} Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.158238 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-mx5pt" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.161413 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-4spbq" event={"ID":"2dd19028-8bc9-48e7-a361-8581aa9d5d29","Type":"ContainerStarted","Data":"d2b39d4939b9560c4a01929d05a4f115e04f5c8d50579c775191e4acda8eb8d2"} Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.161714 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.165165 4959 generic.go:334] "Generic (PLEG): container finished" podID="8171da25-3ff2-431c-a3a6-482426500111" containerID="7dcc6150f9852620755e883307f96c090172cecb9eeb3732af1efe3c7df644d5" exitCode=0 Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.165231 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zf2dp" event={"ID":"8171da25-3ff2-431c-a3a6-482426500111","Type":"ContainerDied","Data":"7dcc6150f9852620755e883307f96c090172cecb9eeb3732af1efe3c7df644d5"} Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.170404 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" podStartSLOduration=5.170355943 podStartE2EDuration="5.170355943s" podCreationTimestamp="2026-01-28 15:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:22.165603917 +0000 UTC m=+1165.611510310" watchObservedRunningTime="2026-01-28 15:36:22.170355943 +0000 UTC m=+1165.616262326" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.184596 4959 scope.go:117] "RemoveContainer" containerID="ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.206766 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-4spbq" podStartSLOduration=5.2067416380000004 podStartE2EDuration="5.206741638s" podCreationTimestamp="2026-01-28 15:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:22.199626483 +0000 UTC m=+1165.645532886" watchObservedRunningTime="2026-01-28 15:36:22.206741638 +0000 UTC m=+1165.652648021" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.215281 4959 scope.go:117] "RemoveContainer" containerID="b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1" Jan 28 15:36:22 crc kubenswrapper[4959]: E0128 15:36:22.216663 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1\": container with ID starting with b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1 not found: ID does not exist" containerID="b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.216711 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1"} err="failed to get container status \"b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1\": rpc error: code = NotFound desc = could not find container \"b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1\": container with ID starting with b7c5cb7f1153033f815ed78a1f4372035515a2d0a7b30ee5eca272ff39c0bab1 not found: ID does not exist" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.216745 4959 scope.go:117] "RemoveContainer" containerID="ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa" Jan 28 15:36:22 crc kubenswrapper[4959]: E0128 15:36:22.217420 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa\": container with ID starting with ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa not found: ID does not exist" containerID="ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.217470 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa"} err="failed to get container status \"ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa\": rpc error: code = NotFound desc = could not find container \"ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa\": container with ID starting with ba4ef793f3243a23f0f7fea6eb0805eebbef47cb3857e1b08d62f204cdd2e3fa not found: ID does not exist" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.217509 4959 scope.go:117] "RemoveContainer" containerID="2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.237974 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-mx5pt"] Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.245062 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-mx5pt"] Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.270529 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-d4zkj"] Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.279027 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-d4zkj"] Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.280385 4959 scope.go:117] "RemoveContainer" containerID="57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.331183 4959 scope.go:117] "RemoveContainer" containerID="2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b" Jan 28 15:36:22 crc kubenswrapper[4959]: E0128 15:36:22.338462 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b\": container with ID starting with 2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b not found: ID does not exist" containerID="2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.338565 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b"} err="failed to get container status \"2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b\": rpc error: code = NotFound desc = could not find container \"2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b\": container with ID starting with 2f0763dcc0593f191822acfa97f4712c34dc57e4cb55cb8be2263a8d9444256b not found: ID does not exist" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.338615 4959 scope.go:117] "RemoveContainer" containerID="57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3" Jan 28 15:36:22 crc kubenswrapper[4959]: E0128 15:36:22.343795 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3\": container with ID starting with 57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3 not found: ID does not exist" containerID="57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.343878 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3"} err="failed to get container status \"57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3\": rpc error: code = NotFound desc = could not find container \"57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3\": container with ID starting with 57ddaa3643134cd9ba89c20855af1da23449fadb0118e16acaeb42fd62ea50e3 not found: ID does not exist" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.601918 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" path="/var/lib/kubelet/pods/3fa1ab35-959f-4009-beb9-fc4e7dc1b825/volumes" Jan 28 15:36:22 crc kubenswrapper[4959]: I0128 15:36:22.603247 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" path="/var/lib/kubelet/pods/e1851d07-f0ce-46bf-88cb-8a4cc94e04ae/volumes" Jan 28 15:36:23 crc kubenswrapper[4959]: I0128 15:36:23.183715 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zf2dp" event={"ID":"8171da25-3ff2-431c-a3a6-482426500111","Type":"ContainerStarted","Data":"3d5d50779be602436f34ef40660e5ded80eed9e060b57635088a5a5d7b88f371"} Jan 28 15:36:23 crc kubenswrapper[4959]: I0128 15:36:23.184088 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-zf2dp" event={"ID":"8171da25-3ff2-431c-a3a6-482426500111","Type":"ContainerStarted","Data":"cb56066e064ddc6df340c9080aa73e4c3b2f25bd8db58024e7eda9d3a7793807"} Jan 28 15:36:23 crc kubenswrapper[4959]: I0128 15:36:23.188763 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:36:23 crc kubenswrapper[4959]: I0128 15:36:23.188800 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:36:23 crc kubenswrapper[4959]: I0128 15:36:23.419222 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 28 15:36:23 crc kubenswrapper[4959]: I0128 15:36:23.420389 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.050469 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.107464 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.136766 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-zf2dp" podStartSLOduration=37.064598143 podStartE2EDuration="1m12.136746967s" podCreationTimestamp="2026-01-28 15:35:12 +0000 UTC" firstStartedPulling="2026-01-28 15:35:44.711312672 +0000 UTC m=+1128.157219055" lastFinishedPulling="2026-01-28 15:36:19.783461506 +0000 UTC m=+1163.229367879" observedRunningTime="2026-01-28 15:36:23.222331858 +0000 UTC m=+1166.668238241" watchObservedRunningTime="2026-01-28 15:36:24.136746967 +0000 UTC m=+1167.582653350" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.198934 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.242680 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.475301 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 28 15:36:24 crc kubenswrapper[4959]: E0128 15:36:24.475854 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" containerName="init" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.475881 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" containerName="init" Jan 28 15:36:24 crc kubenswrapper[4959]: E0128 15:36:24.475890 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" containerName="dnsmasq-dns" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.475898 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" containerName="dnsmasq-dns" Jan 28 15:36:24 crc kubenswrapper[4959]: E0128 15:36:24.475932 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" containerName="init" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.475939 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" containerName="init" Jan 28 15:36:24 crc kubenswrapper[4959]: E0128 15:36:24.475957 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" containerName="dnsmasq-dns" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.475963 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" containerName="dnsmasq-dns" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.476156 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1851d07-f0ce-46bf-88cb-8a4cc94e04ae" containerName="dnsmasq-dns" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.476167 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fa1ab35-959f-4009-beb9-fc4e7dc1b825" containerName="dnsmasq-dns" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.477255 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.480136 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.480299 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-rzxkp" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.480302 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.480685 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.482896 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.592558 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-527kg\" (UniqueName: \"kubernetes.io/projected/48522d8d-37f7-4011-b382-85567a833329-kube-api-access-527kg\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.592637 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.592674 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.593073 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48522d8d-37f7-4011-b382-85567a833329-config\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.593243 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/48522d8d-37f7-4011-b382-85567a833329-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.593378 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.593433 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48522d8d-37f7-4011-b382-85567a833329-scripts\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.695525 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48522d8d-37f7-4011-b382-85567a833329-config\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.695600 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/48522d8d-37f7-4011-b382-85567a833329-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.695663 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.695719 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48522d8d-37f7-4011-b382-85567a833329-scripts\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.695753 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-527kg\" (UniqueName: \"kubernetes.io/projected/48522d8d-37f7-4011-b382-85567a833329-kube-api-access-527kg\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.695786 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.695811 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.696363 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/48522d8d-37f7-4011-b382-85567a833329-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.696735 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48522d8d-37f7-4011-b382-85567a833329-config\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.696785 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48522d8d-37f7-4011-b382-85567a833329-scripts\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.705227 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.705429 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.706434 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/48522d8d-37f7-4011-b382-85567a833329-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.725446 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.725532 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 28 15:36:24 crc kubenswrapper[4959]: I0128 15:36:24.998450 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-527kg\" (UniqueName: \"kubernetes.io/projected/48522d8d-37f7-4011-b382-85567a833329-kube-api-access-527kg\") pod \"ovn-northd-0\" (UID: \"48522d8d-37f7-4011-b382-85567a833329\") " pod="openstack/ovn-northd-0" Jan 28 15:36:25 crc kubenswrapper[4959]: I0128 15:36:25.107954 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 28 15:36:25 crc kubenswrapper[4959]: I0128 15:36:25.644748 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 28 15:36:25 crc kubenswrapper[4959]: I0128 15:36:25.798147 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 28 15:36:25 crc kubenswrapper[4959]: I0128 15:36:25.876254 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 28 15:36:26 crc kubenswrapper[4959]: I0128 15:36:26.222331 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"48522d8d-37f7-4011-b382-85567a833329","Type":"ContainerStarted","Data":"e9316374b4abaf0414e9c4e00277a41e2a72a34607d12846e448d6924a9ffea5"} Jan 28 15:36:27 crc kubenswrapper[4959]: I0128 15:36:27.451142 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 28 15:36:27 crc kubenswrapper[4959]: I0128 15:36:27.542522 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 28 15:36:27 crc kubenswrapper[4959]: I0128 15:36:27.943658 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.131389 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.189099 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-nlfvk"] Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.245729 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"48522d8d-37f7-4011-b382-85567a833329","Type":"ContainerStarted","Data":"a9a3084c0d1b320a46fcf9894dcbe17703ceb1b5b3db0c7e3683ac7a1a35c98e"} Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.245811 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"48522d8d-37f7-4011-b382-85567a833329","Type":"ContainerStarted","Data":"de3af53de2a425cc24bb909e67f240b9360b94764cf4b58ebe2fb0bca954f163"} Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.245883 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" podUID="578a4154-8845-46b5-885d-57c3dd0a22b0" containerName="dnsmasq-dns" containerID="cri-o://75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4" gracePeriod=10 Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.246737 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.285433 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.565277171 podStartE2EDuration="4.285407314s" podCreationTimestamp="2026-01-28 15:36:24 +0000 UTC" firstStartedPulling="2026-01-28 15:36:25.65038826 +0000 UTC m=+1169.096294643" lastFinishedPulling="2026-01-28 15:36:27.370518383 +0000 UTC m=+1170.816424786" observedRunningTime="2026-01-28 15:36:28.283044646 +0000 UTC m=+1171.728951029" watchObservedRunningTime="2026-01-28 15:36:28.285407314 +0000 UTC m=+1171.731313697" Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.689556 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.690191 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.785948 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.899843 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-config\") pod \"578a4154-8845-46b5-885d-57c3dd0a22b0\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.900075 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6mk4\" (UniqueName: \"kubernetes.io/projected/578a4154-8845-46b5-885d-57c3dd0a22b0-kube-api-access-x6mk4\") pod \"578a4154-8845-46b5-885d-57c3dd0a22b0\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.900204 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-dns-svc\") pod \"578a4154-8845-46b5-885d-57c3dd0a22b0\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.900268 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-ovsdbserver-nb\") pod \"578a4154-8845-46b5-885d-57c3dd0a22b0\" (UID: \"578a4154-8845-46b5-885d-57c3dd0a22b0\") " Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.921314 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/578a4154-8845-46b5-885d-57c3dd0a22b0-kube-api-access-x6mk4" (OuterVolumeSpecName: "kube-api-access-x6mk4") pod "578a4154-8845-46b5-885d-57c3dd0a22b0" (UID: "578a4154-8845-46b5-885d-57c3dd0a22b0"). InnerVolumeSpecName "kube-api-access-x6mk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.978613 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-config" (OuterVolumeSpecName: "config") pod "578a4154-8845-46b5-885d-57c3dd0a22b0" (UID: "578a4154-8845-46b5-885d-57c3dd0a22b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:28 crc kubenswrapper[4959]: I0128 15:36:28.980669 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "578a4154-8845-46b5-885d-57c3dd0a22b0" (UID: "578a4154-8845-46b5-885d-57c3dd0a22b0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.002537 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6mk4\" (UniqueName: \"kubernetes.io/projected/578a4154-8845-46b5-885d-57c3dd0a22b0-kube-api-access-x6mk4\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.002576 4959 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.002587 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.003486 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "578a4154-8845-46b5-885d-57c3dd0a22b0" (UID: "578a4154-8845-46b5-885d-57c3dd0a22b0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:29 crc kubenswrapper[4959]: E0128 15:36:29.041082 4959 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb009e707_15ca_458c_ab37_d3cab102e497.slice/crio-46775a6829f3798e82b6a16491062df775f07a96e8becbb731e81f351b330cd7.scope\": RecentStats: unable to find data in memory cache]" Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.104323 4959 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/578a4154-8845-46b5-885d-57c3dd0a22b0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.259074 4959 generic.go:334] "Generic (PLEG): container finished" podID="578a4154-8845-46b5-885d-57c3dd0a22b0" containerID="75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4" exitCode=0 Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.259159 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" event={"ID":"578a4154-8845-46b5-885d-57c3dd0a22b0","Type":"ContainerDied","Data":"75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4"} Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.259208 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.259244 4959 scope.go:117] "RemoveContainer" containerID="75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4" Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.259224 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6f696b9-nlfvk" event={"ID":"578a4154-8845-46b5-885d-57c3dd0a22b0","Type":"ContainerDied","Data":"cd39a6895b2d19b4293f31010b85a5d635321340972e7bbf6c2bbd162da3e4af"} Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.262813 4959 generic.go:334] "Generic (PLEG): container finished" podID="b009e707-15ca-458c-ab37-d3cab102e497" containerID="46775a6829f3798e82b6a16491062df775f07a96e8becbb731e81f351b330cd7" exitCode=0 Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.262861 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-c8wlm" event={"ID":"b009e707-15ca-458c-ab37-d3cab102e497","Type":"ContainerDied","Data":"46775a6829f3798e82b6a16491062df775f07a96e8becbb731e81f351b330cd7"} Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.321653 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-nlfvk"] Jan 28 15:36:29 crc kubenswrapper[4959]: I0128 15:36:29.333542 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f6f696b9-nlfvk"] Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.318586 4959 scope.go:117] "RemoveContainer" containerID="eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.350665 4959 scope.go:117] "RemoveContainer" containerID="75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4" Jan 28 15:36:30 crc kubenswrapper[4959]: E0128 15:36:30.354777 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4\": container with ID starting with 75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4 not found: ID does not exist" containerID="75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.354841 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4"} err="failed to get container status \"75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4\": rpc error: code = NotFound desc = could not find container \"75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4\": container with ID starting with 75d684d6edd6e4ee4cac19a670c653e19780d7ad3672cd57483352690da355c4 not found: ID does not exist" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.354878 4959 scope.go:117] "RemoveContainer" containerID="eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4" Jan 28 15:36:30 crc kubenswrapper[4959]: E0128 15:36:30.355622 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4\": container with ID starting with eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4 not found: ID does not exist" containerID="eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.355696 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4"} err="failed to get container status \"eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4\": rpc error: code = NotFound desc = could not find container \"eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4\": container with ID starting with eef8707d0171967404db64c896321b87f0727c384513bddaa11104018c69bbb4 not found: ID does not exist" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.600315 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="578a4154-8845-46b5-885d-57c3dd0a22b0" path="/var/lib/kubelet/pods/578a4154-8845-46b5-885d-57c3dd0a22b0/volumes" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.709639 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.742963 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.754046 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2c21863c-592f-436a-8fe2-06b0f78b7755-etc-swift\") pod \"swift-storage-0\" (UID: \"2c21863c-592f-436a-8fe2-06b0f78b7755\") " pod="openstack/swift-storage-0" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.803430 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.844800 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-ring-data-devices\") pod \"b009e707-15ca-458c-ab37-d3cab102e497\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.844908 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-swiftconf\") pod \"b009e707-15ca-458c-ab37-d3cab102e497\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.844949 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-combined-ca-bundle\") pod \"b009e707-15ca-458c-ab37-d3cab102e497\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.845025 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b009e707-15ca-458c-ab37-d3cab102e497-etc-swift\") pod \"b009e707-15ca-458c-ab37-d3cab102e497\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.845055 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zxq5\" (UniqueName: \"kubernetes.io/projected/b009e707-15ca-458c-ab37-d3cab102e497-kube-api-access-6zxq5\") pod \"b009e707-15ca-458c-ab37-d3cab102e497\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.845084 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-dispersionconf\") pod \"b009e707-15ca-458c-ab37-d3cab102e497\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.845202 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-scripts\") pod \"b009e707-15ca-458c-ab37-d3cab102e497\" (UID: \"b009e707-15ca-458c-ab37-d3cab102e497\") " Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.846473 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "b009e707-15ca-458c-ab37-d3cab102e497" (UID: "b009e707-15ca-458c-ab37-d3cab102e497"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.846910 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b009e707-15ca-458c-ab37-d3cab102e497-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "b009e707-15ca-458c-ab37-d3cab102e497" (UID: "b009e707-15ca-458c-ab37-d3cab102e497"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.852777 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b009e707-15ca-458c-ab37-d3cab102e497-kube-api-access-6zxq5" (OuterVolumeSpecName: "kube-api-access-6zxq5") pod "b009e707-15ca-458c-ab37-d3cab102e497" (UID: "b009e707-15ca-458c-ab37-d3cab102e497"). InnerVolumeSpecName "kube-api-access-6zxq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.856190 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "b009e707-15ca-458c-ab37-d3cab102e497" (UID: "b009e707-15ca-458c-ab37-d3cab102e497"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.874979 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-scripts" (OuterVolumeSpecName: "scripts") pod "b009e707-15ca-458c-ab37-d3cab102e497" (UID: "b009e707-15ca-458c-ab37-d3cab102e497"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.880858 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b009e707-15ca-458c-ab37-d3cab102e497" (UID: "b009e707-15ca-458c-ab37-d3cab102e497"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.881611 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "b009e707-15ca-458c-ab37-d3cab102e497" (UID: "b009e707-15ca-458c-ab37-d3cab102e497"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.947691 4959 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.947753 4959 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.947767 4959 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.947786 4959 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b009e707-15ca-458c-ab37-d3cab102e497-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.947805 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zxq5\" (UniqueName: \"kubernetes.io/projected/b009e707-15ca-458c-ab37-d3cab102e497-kube-api-access-6zxq5\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.947824 4959 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b009e707-15ca-458c-ab37-d3cab102e497-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:30 crc kubenswrapper[4959]: I0128 15:36:30.947834 4959 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b009e707-15ca-458c-ab37-d3cab102e497-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:31 crc kubenswrapper[4959]: I0128 15:36:31.292191 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-c8wlm" event={"ID":"b009e707-15ca-458c-ab37-d3cab102e497","Type":"ContainerDied","Data":"2d7a9b16e7f8546ee020d2f569d6d8ad58be76573fb66249dfecfd90900ea07a"} Jan 28 15:36:31 crc kubenswrapper[4959]: I0128 15:36:31.292241 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d7a9b16e7f8546ee020d2f569d6d8ad58be76573fb66249dfecfd90900ea07a" Jan 28 15:36:31 crc kubenswrapper[4959]: I0128 15:36:31.292310 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-c8wlm" Jan 28 15:36:31 crc kubenswrapper[4959]: I0128 15:36:31.632332 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 28 15:36:31 crc kubenswrapper[4959]: W0128 15:36:31.635793 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c21863c_592f_436a_8fe2_06b0f78b7755.slice/crio-2c37128a728e77465726b5b4e18c79c04a9cea715a57ca048ba14fd6e1c06c86 WatchSource:0}: Error finding container 2c37128a728e77465726b5b4e18c79c04a9cea715a57ca048ba14fd6e1c06c86: Status 404 returned error can't find the container with id 2c37128a728e77465726b5b4e18c79c04a9cea715a57ca048ba14fd6e1c06c86 Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.235958 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-kvkkf"] Jan 28 15:36:32 crc kubenswrapper[4959]: E0128 15:36:32.237068 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="578a4154-8845-46b5-885d-57c3dd0a22b0" containerName="init" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.237232 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="578a4154-8845-46b5-885d-57c3dd0a22b0" containerName="init" Jan 28 15:36:32 crc kubenswrapper[4959]: E0128 15:36:32.237307 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="578a4154-8845-46b5-885d-57c3dd0a22b0" containerName="dnsmasq-dns" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.237364 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="578a4154-8845-46b5-885d-57c3dd0a22b0" containerName="dnsmasq-dns" Jan 28 15:36:32 crc kubenswrapper[4959]: E0128 15:36:32.237452 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b009e707-15ca-458c-ab37-d3cab102e497" containerName="swift-ring-rebalance" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.237513 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="b009e707-15ca-458c-ab37-d3cab102e497" containerName="swift-ring-rebalance" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.237733 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="578a4154-8845-46b5-885d-57c3dd0a22b0" containerName="dnsmasq-dns" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.237807 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="b009e707-15ca-458c-ab37-d3cab102e497" containerName="swift-ring-rebalance" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.238535 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.241753 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.244921 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kvkkf"] Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.312668 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"2c37128a728e77465726b5b4e18c79c04a9cea715a57ca048ba14fd6e1c06c86"} Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.314961 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4c05f0db-12b1-4491-8680-2df359888603","Type":"ContainerStarted","Data":"3952e066ff6c7659c274b5de529819ac727e60466f0242aaa55c431f8b532c1c"} Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.315294 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.335848 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.700569019 podStartE2EDuration="1m25.335818305s" podCreationTimestamp="2026-01-28 15:35:07 +0000 UTC" firstStartedPulling="2026-01-28 15:35:09.025487353 +0000 UTC m=+1092.471393736" lastFinishedPulling="2026-01-28 15:36:30.660736639 +0000 UTC m=+1174.106643022" observedRunningTime="2026-01-28 15:36:32.329937279 +0000 UTC m=+1175.775843672" watchObservedRunningTime="2026-01-28 15:36:32.335818305 +0000 UTC m=+1175.781724698" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.379600 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07842d9-130c-4db0-9e4f-92ba86a89482-operator-scripts\") pod \"root-account-create-update-kvkkf\" (UID: \"f07842d9-130c-4db0-9e4f-92ba86a89482\") " pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.380198 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdtg9\" (UniqueName: \"kubernetes.io/projected/f07842d9-130c-4db0-9e4f-92ba86a89482-kube-api-access-fdtg9\") pod \"root-account-create-update-kvkkf\" (UID: \"f07842d9-130c-4db0-9e4f-92ba86a89482\") " pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.481811 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07842d9-130c-4db0-9e4f-92ba86a89482-operator-scripts\") pod \"root-account-create-update-kvkkf\" (UID: \"f07842d9-130c-4db0-9e4f-92ba86a89482\") " pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.482216 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdtg9\" (UniqueName: \"kubernetes.io/projected/f07842d9-130c-4db0-9e4f-92ba86a89482-kube-api-access-fdtg9\") pod \"root-account-create-update-kvkkf\" (UID: \"f07842d9-130c-4db0-9e4f-92ba86a89482\") " pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.482868 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07842d9-130c-4db0-9e4f-92ba86a89482-operator-scripts\") pod \"root-account-create-update-kvkkf\" (UID: \"f07842d9-130c-4db0-9e4f-92ba86a89482\") " pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.506271 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdtg9\" (UniqueName: \"kubernetes.io/projected/f07842d9-130c-4db0-9e4f-92ba86a89482-kube-api-access-fdtg9\") pod \"root-account-create-update-kvkkf\" (UID: \"f07842d9-130c-4db0-9e4f-92ba86a89482\") " pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:32 crc kubenswrapper[4959]: I0128 15:36:32.557195 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:33 crc kubenswrapper[4959]: I0128 15:36:33.162464 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kvkkf"] Jan 28 15:36:33 crc kubenswrapper[4959]: W0128 15:36:33.175405 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf07842d9_130c_4db0_9e4f_92ba86a89482.slice/crio-0d88426042c67b370b6f9ce9ba46726862f0a69cbd559928fe819dda8ddae06e WatchSource:0}: Error finding container 0d88426042c67b370b6f9ce9ba46726862f0a69cbd559928fe819dda8ddae06e: Status 404 returned error can't find the container with id 0d88426042c67b370b6f9ce9ba46726862f0a69cbd559928fe819dda8ddae06e Jan 28 15:36:33 crc kubenswrapper[4959]: I0128 15:36:33.333273 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"a27227df69ab7c3b110dfb7e48e18217b9e3d2d67b94e913b5e578629b7ee256"} Jan 28 15:36:33 crc kubenswrapper[4959]: I0128 15:36:33.335876 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kvkkf" event={"ID":"f07842d9-130c-4db0-9e4f-92ba86a89482","Type":"ContainerStarted","Data":"0d88426042c67b370b6f9ce9ba46726862f0a69cbd559928fe819dda8ddae06e"} Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.344318 4959 generic.go:334] "Generic (PLEG): container finished" podID="f07842d9-130c-4db0-9e4f-92ba86a89482" containerID="8873f71d1c1deeee4f7bfa8f936db4c0767888a7c270c2755d109434387fdd2c" exitCode=0 Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.344392 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kvkkf" event={"ID":"f07842d9-130c-4db0-9e4f-92ba86a89482","Type":"ContainerDied","Data":"8873f71d1c1deeee4f7bfa8f936db4c0767888a7c270c2755d109434387fdd2c"} Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.349009 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"2b025f660be0b7efde3a0e1ed4a26e5ddd8e0119ba648ca83ac82ec55addbb02"} Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.349080 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"0ffefcb67caec40cd3802425caae7471b1aee5d30a8741060610ca42613281f7"} Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.349091 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"48ee7435ba6267358e919aaa6419d555e6cd23b2beae57ee652cec6c0bccb588"} Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.729569 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-r26g4"] Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.731397 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.751950 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-r26g4"] Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.831622 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2916a6-1c00-4636-b1a2-ac651646098f-operator-scripts\") pod \"keystone-db-create-r26g4\" (UID: \"0d2916a6-1c00-4636-b1a2-ac651646098f\") " pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.832067 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk6jj\" (UniqueName: \"kubernetes.io/projected/0d2916a6-1c00-4636-b1a2-ac651646098f-kube-api-access-zk6jj\") pod \"keystone-db-create-r26g4\" (UID: \"0d2916a6-1c00-4636-b1a2-ac651646098f\") " pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.840949 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-b8e2-account-create-update-zz4f6"] Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.844683 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.849745 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.852729 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b8e2-account-create-update-zz4f6"] Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.934164 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c24cb3a0-a66a-4419-8427-a357605ab17a-operator-scripts\") pod \"keystone-b8e2-account-create-update-zz4f6\" (UID: \"c24cb3a0-a66a-4419-8427-a357605ab17a\") " pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.934904 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2916a6-1c00-4636-b1a2-ac651646098f-operator-scripts\") pod \"keystone-db-create-r26g4\" (UID: \"0d2916a6-1c00-4636-b1a2-ac651646098f\") " pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.935197 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk6jj\" (UniqueName: \"kubernetes.io/projected/0d2916a6-1c00-4636-b1a2-ac651646098f-kube-api-access-zk6jj\") pod \"keystone-db-create-r26g4\" (UID: \"0d2916a6-1c00-4636-b1a2-ac651646098f\") " pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.935387 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cshps\" (UniqueName: \"kubernetes.io/projected/c24cb3a0-a66a-4419-8427-a357605ab17a-kube-api-access-cshps\") pod \"keystone-b8e2-account-create-update-zz4f6\" (UID: \"c24cb3a0-a66a-4419-8427-a357605ab17a\") " pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.937418 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2916a6-1c00-4636-b1a2-ac651646098f-operator-scripts\") pod \"keystone-db-create-r26g4\" (UID: \"0d2916a6-1c00-4636-b1a2-ac651646098f\") " pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:34 crc kubenswrapper[4959]: I0128 15:36:34.959893 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk6jj\" (UniqueName: \"kubernetes.io/projected/0d2916a6-1c00-4636-b1a2-ac651646098f-kube-api-access-zk6jj\") pod \"keystone-db-create-r26g4\" (UID: \"0d2916a6-1c00-4636-b1a2-ac651646098f\") " pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.037446 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c24cb3a0-a66a-4419-8427-a357605ab17a-operator-scripts\") pod \"keystone-b8e2-account-create-update-zz4f6\" (UID: \"c24cb3a0-a66a-4419-8427-a357605ab17a\") " pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.037563 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cshps\" (UniqueName: \"kubernetes.io/projected/c24cb3a0-a66a-4419-8427-a357605ab17a-kube-api-access-cshps\") pod \"keystone-b8e2-account-create-update-zz4f6\" (UID: \"c24cb3a0-a66a-4419-8427-a357605ab17a\") " pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.038410 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c24cb3a0-a66a-4419-8427-a357605ab17a-operator-scripts\") pod \"keystone-b8e2-account-create-update-zz4f6\" (UID: \"c24cb3a0-a66a-4419-8427-a357605ab17a\") " pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.061037 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.063270 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cshps\" (UniqueName: \"kubernetes.io/projected/c24cb3a0-a66a-4419-8427-a357605ab17a-kube-api-access-cshps\") pod \"keystone-b8e2-account-create-update-zz4f6\" (UID: \"c24cb3a0-a66a-4419-8427-a357605ab17a\") " pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.131284 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-4wc26"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.133220 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-4wc26" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.148714 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-4wc26"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.163889 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.172467 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-04e3-account-create-update-tqpxc"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.181184 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.190829 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.225174 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-04e3-account-create-update-tqpxc"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.241603 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxxd2\" (UniqueName: \"kubernetes.io/projected/1f42737f-e000-4c42-8862-9b55c145364c-kube-api-access-vxxd2\") pod \"placement-db-create-4wc26\" (UID: \"1f42737f-e000-4c42-8862-9b55c145364c\") " pod="openstack/placement-db-create-4wc26" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.241961 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f42737f-e000-4c42-8862-9b55c145364c-operator-scripts\") pod \"placement-db-create-4wc26\" (UID: \"1f42737f-e000-4c42-8862-9b55c145364c\") " pod="openstack/placement-db-create-4wc26" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.242290 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-874tt\" (UniqueName: \"kubernetes.io/projected/f055c78c-148d-47e9-847c-99dfe92b152a-kube-api-access-874tt\") pod \"placement-04e3-account-create-update-tqpxc\" (UID: \"f055c78c-148d-47e9-847c-99dfe92b152a\") " pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.242395 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f055c78c-148d-47e9-847c-99dfe92b152a-operator-scripts\") pod \"placement-04e3-account-create-update-tqpxc\" (UID: \"f055c78c-148d-47e9-847c-99dfe92b152a\") " pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.344365 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f42737f-e000-4c42-8862-9b55c145364c-operator-scripts\") pod \"placement-db-create-4wc26\" (UID: \"1f42737f-e000-4c42-8862-9b55c145364c\") " pod="openstack/placement-db-create-4wc26" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.344490 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-874tt\" (UniqueName: \"kubernetes.io/projected/f055c78c-148d-47e9-847c-99dfe92b152a-kube-api-access-874tt\") pod \"placement-04e3-account-create-update-tqpxc\" (UID: \"f055c78c-148d-47e9-847c-99dfe92b152a\") " pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.344549 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f055c78c-148d-47e9-847c-99dfe92b152a-operator-scripts\") pod \"placement-04e3-account-create-update-tqpxc\" (UID: \"f055c78c-148d-47e9-847c-99dfe92b152a\") " pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.344650 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxxd2\" (UniqueName: \"kubernetes.io/projected/1f42737f-e000-4c42-8862-9b55c145364c-kube-api-access-vxxd2\") pod \"placement-db-create-4wc26\" (UID: \"1f42737f-e000-4c42-8862-9b55c145364c\") " pod="openstack/placement-db-create-4wc26" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.345492 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f055c78c-148d-47e9-847c-99dfe92b152a-operator-scripts\") pod \"placement-04e3-account-create-update-tqpxc\" (UID: \"f055c78c-148d-47e9-847c-99dfe92b152a\") " pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.345538 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f42737f-e000-4c42-8862-9b55c145364c-operator-scripts\") pod \"placement-db-create-4wc26\" (UID: \"1f42737f-e000-4c42-8862-9b55c145364c\") " pod="openstack/placement-db-create-4wc26" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.363903 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-874tt\" (UniqueName: \"kubernetes.io/projected/f055c78c-148d-47e9-847c-99dfe92b152a-kube-api-access-874tt\") pod \"placement-04e3-account-create-update-tqpxc\" (UID: \"f055c78c-148d-47e9-847c-99dfe92b152a\") " pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.375369 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxxd2\" (UniqueName: \"kubernetes.io/projected/1f42737f-e000-4c42-8862-9b55c145364c-kube-api-access-vxxd2\") pod \"placement-db-create-4wc26\" (UID: \"1f42737f-e000-4c42-8862-9b55c145364c\") " pod="openstack/placement-db-create-4wc26" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.462921 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-4wc26" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.503171 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.668797 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-5cndm"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.670381 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5cndm" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.684326 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5cndm"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.759996 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcnw7\" (UniqueName: \"kubernetes.io/projected/b5766c03-911f-42fc-9e74-83e62a42d6e4-kube-api-access-wcnw7\") pod \"glance-db-create-5cndm\" (UID: \"b5766c03-911f-42fc-9e74-83e62a42d6e4\") " pod="openstack/glance-db-create-5cndm" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.760200 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5766c03-911f-42fc-9e74-83e62a42d6e4-operator-scripts\") pod \"glance-db-create-5cndm\" (UID: \"b5766c03-911f-42fc-9e74-83e62a42d6e4\") " pod="openstack/glance-db-create-5cndm" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.801678 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-5b25-account-create-update-csj55"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.807462 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.811191 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.814128 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5b25-account-create-update-csj55"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.864634 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcnw7\" (UniqueName: \"kubernetes.io/projected/b5766c03-911f-42fc-9e74-83e62a42d6e4-kube-api-access-wcnw7\") pod \"glance-db-create-5cndm\" (UID: \"b5766c03-911f-42fc-9e74-83e62a42d6e4\") " pod="openstack/glance-db-create-5cndm" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.864696 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p78pj\" (UniqueName: \"kubernetes.io/projected/ff101780-4112-48ff-a0f8-d5acff017705-kube-api-access-p78pj\") pod \"glance-5b25-account-create-update-csj55\" (UID: \"ff101780-4112-48ff-a0f8-d5acff017705\") " pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.864769 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff101780-4112-48ff-a0f8-d5acff017705-operator-scripts\") pod \"glance-5b25-account-create-update-csj55\" (UID: \"ff101780-4112-48ff-a0f8-d5acff017705\") " pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.864796 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5766c03-911f-42fc-9e74-83e62a42d6e4-operator-scripts\") pod \"glance-db-create-5cndm\" (UID: \"b5766c03-911f-42fc-9e74-83e62a42d6e4\") " pod="openstack/glance-db-create-5cndm" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.865726 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5766c03-911f-42fc-9e74-83e62a42d6e4-operator-scripts\") pod \"glance-db-create-5cndm\" (UID: \"b5766c03-911f-42fc-9e74-83e62a42d6e4\") " pod="openstack/glance-db-create-5cndm" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.894578 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcnw7\" (UniqueName: \"kubernetes.io/projected/b5766c03-911f-42fc-9e74-83e62a42d6e4-kube-api-access-wcnw7\") pod \"glance-db-create-5cndm\" (UID: \"b5766c03-911f-42fc-9e74-83e62a42d6e4\") " pod="openstack/glance-db-create-5cndm" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.939576 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b8e2-account-create-update-zz4f6"] Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.966531 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff101780-4112-48ff-a0f8-d5acff017705-operator-scripts\") pod \"glance-5b25-account-create-update-csj55\" (UID: \"ff101780-4112-48ff-a0f8-d5acff017705\") " pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.966774 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p78pj\" (UniqueName: \"kubernetes.io/projected/ff101780-4112-48ff-a0f8-d5acff017705-kube-api-access-p78pj\") pod \"glance-5b25-account-create-update-csj55\" (UID: \"ff101780-4112-48ff-a0f8-d5acff017705\") " pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.968692 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff101780-4112-48ff-a0f8-d5acff017705-operator-scripts\") pod \"glance-5b25-account-create-update-csj55\" (UID: \"ff101780-4112-48ff-a0f8-d5acff017705\") " pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:35 crc kubenswrapper[4959]: I0128 15:36:35.988453 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p78pj\" (UniqueName: \"kubernetes.io/projected/ff101780-4112-48ff-a0f8-d5acff017705-kube-api-access-p78pj\") pod \"glance-5b25-account-create-update-csj55\" (UID: \"ff101780-4112-48ff-a0f8-d5acff017705\") " pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.009677 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5cndm" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.054844 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-r26g4"] Jan 28 15:36:36 crc kubenswrapper[4959]: W0128 15:36:36.056396 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc24cb3a0_a66a_4419_8427_a357605ab17a.slice/crio-f1e66f9a2ad080b7b12bc40060793379a80746cf5829e9ab34ea7cdc7dd63ff6 WatchSource:0}: Error finding container f1e66f9a2ad080b7b12bc40060793379a80746cf5829e9ab34ea7cdc7dd63ff6: Status 404 returned error can't find the container with id f1e66f9a2ad080b7b12bc40060793379a80746cf5829e9ab34ea7cdc7dd63ff6 Jan 28 15:36:36 crc kubenswrapper[4959]: W0128 15:36:36.058183 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d2916a6_1c00_4636_b1a2_ac651646098f.slice/crio-f386c88361473d065e95dff6699c1934b84da26b8395820ff594b43c670edb6f WatchSource:0}: Error finding container f386c88361473d065e95dff6699c1934b84da26b8395820ff594b43c670edb6f: Status 404 returned error can't find the container with id f386c88361473d065e95dff6699c1934b84da26b8395820ff594b43c670edb6f Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.129381 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.131428 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.272334 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdtg9\" (UniqueName: \"kubernetes.io/projected/f07842d9-130c-4db0-9e4f-92ba86a89482-kube-api-access-fdtg9\") pod \"f07842d9-130c-4db0-9e4f-92ba86a89482\" (UID: \"f07842d9-130c-4db0-9e4f-92ba86a89482\") " Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.272651 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07842d9-130c-4db0-9e4f-92ba86a89482-operator-scripts\") pod \"f07842d9-130c-4db0-9e4f-92ba86a89482\" (UID: \"f07842d9-130c-4db0-9e4f-92ba86a89482\") " Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.273428 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f07842d9-130c-4db0-9e4f-92ba86a89482-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f07842d9-130c-4db0-9e4f-92ba86a89482" (UID: "f07842d9-130c-4db0-9e4f-92ba86a89482"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.277465 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f07842d9-130c-4db0-9e4f-92ba86a89482-kube-api-access-fdtg9" (OuterVolumeSpecName: "kube-api-access-fdtg9") pod "f07842d9-130c-4db0-9e4f-92ba86a89482" (UID: "f07842d9-130c-4db0-9e4f-92ba86a89482"). InnerVolumeSpecName "kube-api-access-fdtg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.373892 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kvkkf" event={"ID":"f07842d9-130c-4db0-9e4f-92ba86a89482","Type":"ContainerDied","Data":"0d88426042c67b370b6f9ce9ba46726862f0a69cbd559928fe819dda8ddae06e"} Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.373950 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d88426042c67b370b6f9ce9ba46726862f0a69cbd559928fe819dda8ddae06e" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.374055 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kvkkf" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.375551 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f07842d9-130c-4db0-9e4f-92ba86a89482-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.375576 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdtg9\" (UniqueName: \"kubernetes.io/projected/f07842d9-130c-4db0-9e4f-92ba86a89482-kube-api-access-fdtg9\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.376391 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b8e2-account-create-update-zz4f6" event={"ID":"c24cb3a0-a66a-4419-8427-a357605ab17a","Type":"ContainerStarted","Data":"f1e66f9a2ad080b7b12bc40060793379a80746cf5829e9ab34ea7cdc7dd63ff6"} Jan 28 15:36:36 crc kubenswrapper[4959]: I0128 15:36:36.378020 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r26g4" event={"ID":"0d2916a6-1c00-4636-b1a2-ac651646098f","Type":"ContainerStarted","Data":"f386c88361473d065e95dff6699c1934b84da26b8395820ff594b43c670edb6f"} Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.048269 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-04e3-account-create-update-tqpxc"] Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.134281 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5cndm"] Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.141520 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-4wc26"] Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.267343 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5b25-account-create-update-csj55"] Jan 28 15:36:37 crc kubenswrapper[4959]: W0128 15:36:37.284174 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff101780_4112_48ff_a0f8_d5acff017705.slice/crio-19bd025a25ec5b31f8747032a9b067533b1bcd00ae7ac389fb5183fb6875d070 WatchSource:0}: Error finding container 19bd025a25ec5b31f8747032a9b067533b1bcd00ae7ac389fb5183fb6875d070: Status 404 returned error can't find the container with id 19bd025a25ec5b31f8747032a9b067533b1bcd00ae7ac389fb5183fb6875d070 Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.390950 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-04e3-account-create-update-tqpxc" event={"ID":"f055c78c-148d-47e9-847c-99dfe92b152a","Type":"ContainerStarted","Data":"73917b41672cab4d6b5a86db56c94d635402dbb1701be73b009761f029de3cc4"} Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.392631 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-4wc26" event={"ID":"1f42737f-e000-4c42-8862-9b55c145364c","Type":"ContainerStarted","Data":"310253c81a3b90167e90a0bffe5971a898fd11d51da36028e56f4d64925f6b5d"} Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.396319 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"806ffa0f01bb3b839a75905db1deae9da409d00d3dd7d6d1d4524890c3dadcc6"} Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.402423 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b8e2-account-create-update-zz4f6" event={"ID":"c24cb3a0-a66a-4419-8427-a357605ab17a","Type":"ContainerStarted","Data":"edd7b1c2b8d6fa330502eb5ff2f585726244a751368dad20c1eb2cd4077ae069"} Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.403966 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5cndm" event={"ID":"b5766c03-911f-42fc-9e74-83e62a42d6e4","Type":"ContainerStarted","Data":"29124194f7cd024954402cbf7c3988ca48e2ad3dd3349c0a19dad7fad4337f36"} Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.405729 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5b25-account-create-update-csj55" event={"ID":"ff101780-4112-48ff-a0f8-d5acff017705","Type":"ContainerStarted","Data":"19bd025a25ec5b31f8747032a9b067533b1bcd00ae7ac389fb5183fb6875d070"} Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.412007 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r26g4" event={"ID":"0d2916a6-1c00-4636-b1a2-ac651646098f","Type":"ContainerStarted","Data":"2c0d09e171cd56d136a483d745796cfc0e46d5482a4de6742b0aafe8d28dcd8c"} Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.439868 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-r26g4" podStartSLOduration=3.439845718 podStartE2EDuration="3.439845718s" podCreationTimestamp="2026-01-28 15:36:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:37.43302061 +0000 UTC m=+1180.878927013" watchObservedRunningTime="2026-01-28 15:36:37.439845718 +0000 UTC m=+1180.885752101" Jan 28 15:36:37 crc kubenswrapper[4959]: I0128 15:36:37.976568 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.340081 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kvkkf"] Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.347784 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-kvkkf"] Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.424240 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-4wc26" event={"ID":"1f42737f-e000-4c42-8862-9b55c145364c","Type":"ContainerStarted","Data":"5bea39b1e9b6c3ef48152fd709f1d1108ebd5e0ddb6b1fd73e40728607716048"} Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.430258 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"85d8f5ca7f89fe8b77d1556075509dbe3677fdadb860e5dd70665db490f8c58d"} Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.432332 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5cndm" event={"ID":"b5766c03-911f-42fc-9e74-83e62a42d6e4","Type":"ContainerStarted","Data":"325bec10a9e1cc2dfaf85077ee96bbe15416280e684927be4f208d49cbe7df1e"} Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.436617 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5b25-account-create-update-csj55" event={"ID":"ff101780-4112-48ff-a0f8-d5acff017705","Type":"ContainerStarted","Data":"4c6a61d0140e6b48d64918bbac4d8a18e32a9c5f8e73799836bec3a5051ea866"} Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.439609 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-04e3-account-create-update-tqpxc" event={"ID":"f055c78c-148d-47e9-847c-99dfe92b152a","Type":"ContainerStarted","Data":"8098bdf7afbc11fda62036cd2b83c7c5cad0334551293fc4331e7c92b9e4f24e"} Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.443253 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-4wc26" podStartSLOduration=3.4432300270000002 podStartE2EDuration="3.443230027s" podCreationTimestamp="2026-01-28 15:36:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:38.441846223 +0000 UTC m=+1181.887752616" watchObservedRunningTime="2026-01-28 15:36:38.443230027 +0000 UTC m=+1181.889136410" Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.469883 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-5b25-account-create-update-csj55" podStartSLOduration=3.469846642 podStartE2EDuration="3.469846642s" podCreationTimestamp="2026-01-28 15:36:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:38.459249701 +0000 UTC m=+1181.905156094" watchObservedRunningTime="2026-01-28 15:36:38.469846642 +0000 UTC m=+1181.915753025" Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.489199 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-5cndm" podStartSLOduration=3.489176507 podStartE2EDuration="3.489176507s" podCreationTimestamp="2026-01-28 15:36:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:38.477069379 +0000 UTC m=+1181.922975782" watchObservedRunningTime="2026-01-28 15:36:38.489176507 +0000 UTC m=+1181.935082890" Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.508701 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-04e3-account-create-update-tqpxc" podStartSLOduration=3.508670676 podStartE2EDuration="3.508670676s" podCreationTimestamp="2026-01-28 15:36:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:38.503695765 +0000 UTC m=+1181.949602158" watchObservedRunningTime="2026-01-28 15:36:38.508670676 +0000 UTC m=+1181.954577079" Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.549454 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-b8e2-account-create-update-zz4f6" podStartSLOduration=4.549426519 podStartE2EDuration="4.549426519s" podCreationTimestamp="2026-01-28 15:36:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:38.532490913 +0000 UTC m=+1181.978397296" watchObservedRunningTime="2026-01-28 15:36:38.549426519 +0000 UTC m=+1181.995332902" Jan 28 15:36:38 crc kubenswrapper[4959]: I0128 15:36:38.598207 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f07842d9-130c-4db0-9e4f-92ba86a89482" path="/var/lib/kubelet/pods/f07842d9-130c-4db0-9e4f-92ba86a89482/volumes" Jan 28 15:36:40 crc kubenswrapper[4959]: I0128 15:36:40.461017 4959 generic.go:334] "Generic (PLEG): container finished" podID="0d2916a6-1c00-4636-b1a2-ac651646098f" containerID="2c0d09e171cd56d136a483d745796cfc0e46d5482a4de6742b0aafe8d28dcd8c" exitCode=0 Jan 28 15:36:40 crc kubenswrapper[4959]: I0128 15:36:40.461149 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r26g4" event={"ID":"0d2916a6-1c00-4636-b1a2-ac651646098f","Type":"ContainerDied","Data":"2c0d09e171cd56d136a483d745796cfc0e46d5482a4de6742b0aafe8d28dcd8c"} Jan 28 15:36:41 crc kubenswrapper[4959]: I0128 15:36:41.849383 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:41 crc kubenswrapper[4959]: I0128 15:36:41.878211 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2916a6-1c00-4636-b1a2-ac651646098f-operator-scripts\") pod \"0d2916a6-1c00-4636-b1a2-ac651646098f\" (UID: \"0d2916a6-1c00-4636-b1a2-ac651646098f\") " Jan 28 15:36:41 crc kubenswrapper[4959]: I0128 15:36:41.878432 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zk6jj\" (UniqueName: \"kubernetes.io/projected/0d2916a6-1c00-4636-b1a2-ac651646098f-kube-api-access-zk6jj\") pod \"0d2916a6-1c00-4636-b1a2-ac651646098f\" (UID: \"0d2916a6-1c00-4636-b1a2-ac651646098f\") " Jan 28 15:36:41 crc kubenswrapper[4959]: I0128 15:36:41.879853 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d2916a6-1c00-4636-b1a2-ac651646098f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0d2916a6-1c00-4636-b1a2-ac651646098f" (UID: "0d2916a6-1c00-4636-b1a2-ac651646098f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:41 crc kubenswrapper[4959]: I0128 15:36:41.888156 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d2916a6-1c00-4636-b1a2-ac651646098f-kube-api-access-zk6jj" (OuterVolumeSpecName: "kube-api-access-zk6jj") pod "0d2916a6-1c00-4636-b1a2-ac651646098f" (UID: "0d2916a6-1c00-4636-b1a2-ac651646098f"). InnerVolumeSpecName "kube-api-access-zk6jj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:41 crc kubenswrapper[4959]: I0128 15:36:41.980640 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2916a6-1c00-4636-b1a2-ac651646098f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:41 crc kubenswrapper[4959]: I0128 15:36:41.980688 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zk6jj\" (UniqueName: \"kubernetes.io/projected/0d2916a6-1c00-4636-b1a2-ac651646098f-kube-api-access-zk6jj\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:42 crc kubenswrapper[4959]: I0128 15:36:42.483007 4959 generic.go:334] "Generic (PLEG): container finished" podID="b5766c03-911f-42fc-9e74-83e62a42d6e4" containerID="325bec10a9e1cc2dfaf85077ee96bbe15416280e684927be4f208d49cbe7df1e" exitCode=0 Jan 28 15:36:42 crc kubenswrapper[4959]: I0128 15:36:42.483148 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5cndm" event={"ID":"b5766c03-911f-42fc-9e74-83e62a42d6e4","Type":"ContainerDied","Data":"325bec10a9e1cc2dfaf85077ee96bbe15416280e684927be4f208d49cbe7df1e"} Jan 28 15:36:42 crc kubenswrapper[4959]: I0128 15:36:42.486609 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r26g4" event={"ID":"0d2916a6-1c00-4636-b1a2-ac651646098f","Type":"ContainerDied","Data":"f386c88361473d065e95dff6699c1934b84da26b8395820ff594b43c670edb6f"} Jan 28 15:36:42 crc kubenswrapper[4959]: I0128 15:36:42.486676 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f386c88361473d065e95dff6699c1934b84da26b8395820ff594b43c670edb6f" Jan 28 15:36:42 crc kubenswrapper[4959]: I0128 15:36:42.486679 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r26g4" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.334872 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-2rc6d"] Jan 28 15:36:43 crc kubenswrapper[4959]: E0128 15:36:43.335844 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2916a6-1c00-4636-b1a2-ac651646098f" containerName="mariadb-database-create" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.335864 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2916a6-1c00-4636-b1a2-ac651646098f" containerName="mariadb-database-create" Jan 28 15:36:43 crc kubenswrapper[4959]: E0128 15:36:43.335883 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f07842d9-130c-4db0-9e4f-92ba86a89482" containerName="mariadb-account-create-update" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.335892 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f07842d9-130c-4db0-9e4f-92ba86a89482" containerName="mariadb-account-create-update" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.336064 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f07842d9-130c-4db0-9e4f-92ba86a89482" containerName="mariadb-account-create-update" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.336084 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2916a6-1c00-4636-b1a2-ac651646098f" containerName="mariadb-database-create" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.336819 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.339498 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.350597 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-2rc6d"] Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.407497 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ct6d\" (UniqueName: \"kubernetes.io/projected/99812e98-c4ee-439d-aea7-71a1ac4f02fa-kube-api-access-2ct6d\") pod \"root-account-create-update-2rc6d\" (UID: \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\") " pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.407677 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99812e98-c4ee-439d-aea7-71a1ac4f02fa-operator-scripts\") pod \"root-account-create-update-2rc6d\" (UID: \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\") " pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.509650 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99812e98-c4ee-439d-aea7-71a1ac4f02fa-operator-scripts\") pod \"root-account-create-update-2rc6d\" (UID: \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\") " pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.509956 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ct6d\" (UniqueName: \"kubernetes.io/projected/99812e98-c4ee-439d-aea7-71a1ac4f02fa-kube-api-access-2ct6d\") pod \"root-account-create-update-2rc6d\" (UID: \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\") " pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.510630 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99812e98-c4ee-439d-aea7-71a1ac4f02fa-operator-scripts\") pod \"root-account-create-update-2rc6d\" (UID: \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\") " pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.534881 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ct6d\" (UniqueName: \"kubernetes.io/projected/99812e98-c4ee-439d-aea7-71a1ac4f02fa-kube-api-access-2ct6d\") pod \"root-account-create-update-2rc6d\" (UID: \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\") " pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.660724 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:43 crc kubenswrapper[4959]: I0128 15:36:43.892525 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5cndm" Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.018460 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcnw7\" (UniqueName: \"kubernetes.io/projected/b5766c03-911f-42fc-9e74-83e62a42d6e4-kube-api-access-wcnw7\") pod \"b5766c03-911f-42fc-9e74-83e62a42d6e4\" (UID: \"b5766c03-911f-42fc-9e74-83e62a42d6e4\") " Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.018636 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5766c03-911f-42fc-9e74-83e62a42d6e4-operator-scripts\") pod \"b5766c03-911f-42fc-9e74-83e62a42d6e4\" (UID: \"b5766c03-911f-42fc-9e74-83e62a42d6e4\") " Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.019967 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5766c03-911f-42fc-9e74-83e62a42d6e4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b5766c03-911f-42fc-9e74-83e62a42d6e4" (UID: "b5766c03-911f-42fc-9e74-83e62a42d6e4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.026283 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5766c03-911f-42fc-9e74-83e62a42d6e4-kube-api-access-wcnw7" (OuterVolumeSpecName: "kube-api-access-wcnw7") pod "b5766c03-911f-42fc-9e74-83e62a42d6e4" (UID: "b5766c03-911f-42fc-9e74-83e62a42d6e4"). InnerVolumeSpecName "kube-api-access-wcnw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.121511 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcnw7\" (UniqueName: \"kubernetes.io/projected/b5766c03-911f-42fc-9e74-83e62a42d6e4-kube-api-access-wcnw7\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.121561 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5766c03-911f-42fc-9e74-83e62a42d6e4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.268573 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-2rc6d"] Jan 28 15:36:44 crc kubenswrapper[4959]: W0128 15:36:44.275027 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99812e98_c4ee_439d_aea7_71a1ac4f02fa.slice/crio-8f68d5119b9cc00812f01b28ebcf60defe3900415cf6a31bffe9c07904b72544 WatchSource:0}: Error finding container 8f68d5119b9cc00812f01b28ebcf60defe3900415cf6a31bffe9c07904b72544: Status 404 returned error can't find the container with id 8f68d5119b9cc00812f01b28ebcf60defe3900415cf6a31bffe9c07904b72544 Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.505779 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"6bd11d5c4336ef5762e16a272c8c0b20d28a57335c7724eafc2dd4a77af9aebb"} Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.507559 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-2rc6d" event={"ID":"99812e98-c4ee-439d-aea7-71a1ac4f02fa","Type":"ContainerStarted","Data":"8f68d5119b9cc00812f01b28ebcf60defe3900415cf6a31bffe9c07904b72544"} Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.509820 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5cndm" Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.509820 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5cndm" event={"ID":"b5766c03-911f-42fc-9e74-83e62a42d6e4","Type":"ContainerDied","Data":"29124194f7cd024954402cbf7c3988ca48e2ad3dd3349c0a19dad7fad4337f36"} Jan 28 15:36:44 crc kubenswrapper[4959]: I0128 15:36:44.509867 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29124194f7cd024954402cbf7c3988ca48e2ad3dd3349c0a19dad7fad4337f36" Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.170892 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.520047 4959 generic.go:334] "Generic (PLEG): container finished" podID="ff101780-4112-48ff-a0f8-d5acff017705" containerID="4c6a61d0140e6b48d64918bbac4d8a18e32a9c5f8e73799836bec3a5051ea866" exitCode=0 Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.520165 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5b25-account-create-update-csj55" event={"ID":"ff101780-4112-48ff-a0f8-d5acff017705","Type":"ContainerDied","Data":"4c6a61d0140e6b48d64918bbac4d8a18e32a9c5f8e73799836bec3a5051ea866"} Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.522341 4959 generic.go:334] "Generic (PLEG): container finished" podID="f055c78c-148d-47e9-847c-99dfe92b152a" containerID="8098bdf7afbc11fda62036cd2b83c7c5cad0334551293fc4331e7c92b9e4f24e" exitCode=0 Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.522384 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-04e3-account-create-update-tqpxc" event={"ID":"f055c78c-148d-47e9-847c-99dfe92b152a","Type":"ContainerDied","Data":"8098bdf7afbc11fda62036cd2b83c7c5cad0334551293fc4331e7c92b9e4f24e"} Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.524206 4959 generic.go:334] "Generic (PLEG): container finished" podID="1f42737f-e000-4c42-8862-9b55c145364c" containerID="5bea39b1e9b6c3ef48152fd709f1d1108ebd5e0ddb6b1fd73e40728607716048" exitCode=0 Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.524247 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-4wc26" event={"ID":"1f42737f-e000-4c42-8862-9b55c145364c","Type":"ContainerDied","Data":"5bea39b1e9b6c3ef48152fd709f1d1108ebd5e0ddb6b1fd73e40728607716048"} Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.527026 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"f1b3ef63241babe87a94563a656db8e286a337fdcadce868f9c09325c82baae2"} Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.528662 4959 generic.go:334] "Generic (PLEG): container finished" podID="99812e98-c4ee-439d-aea7-71a1ac4f02fa" containerID="add42293b08ef2fd4803e1b1caea8c3a26154d7ad13f0eb5a209c9ca77cb3082" exitCode=0 Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.528773 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-2rc6d" event={"ID":"99812e98-c4ee-439d-aea7-71a1ac4f02fa","Type":"ContainerDied","Data":"add42293b08ef2fd4803e1b1caea8c3a26154d7ad13f0eb5a209c9ca77cb3082"} Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.530213 4959 generic.go:334] "Generic (PLEG): container finished" podID="c24cb3a0-a66a-4419-8427-a357605ab17a" containerID="edd7b1c2b8d6fa330502eb5ff2f585726244a751368dad20c1eb2cd4077ae069" exitCode=0 Jan 28 15:36:45 crc kubenswrapper[4959]: I0128 15:36:45.530252 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b8e2-account-create-update-zz4f6" event={"ID":"c24cb3a0-a66a-4419-8427-a357605ab17a","Type":"ContainerDied","Data":"edd7b1c2b8d6fa330502eb5ff2f585726244a751368dad20c1eb2cd4077ae069"} Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.013224 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.183720 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p78pj\" (UniqueName: \"kubernetes.io/projected/ff101780-4112-48ff-a0f8-d5acff017705-kube-api-access-p78pj\") pod \"ff101780-4112-48ff-a0f8-d5acff017705\" (UID: \"ff101780-4112-48ff-a0f8-d5acff017705\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.183876 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff101780-4112-48ff-a0f8-d5acff017705-operator-scripts\") pod \"ff101780-4112-48ff-a0f8-d5acff017705\" (UID: \"ff101780-4112-48ff-a0f8-d5acff017705\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.184858 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff101780-4112-48ff-a0f8-d5acff017705-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ff101780-4112-48ff-a0f8-d5acff017705" (UID: "ff101780-4112-48ff-a0f8-d5acff017705"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.192981 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff101780-4112-48ff-a0f8-d5acff017705-kube-api-access-p78pj" (OuterVolumeSpecName: "kube-api-access-p78pj") pod "ff101780-4112-48ff-a0f8-d5acff017705" (UID: "ff101780-4112-48ff-a0f8-d5acff017705"). InnerVolumeSpecName "kube-api-access-p78pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.286101 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ff101780-4112-48ff-a0f8-d5acff017705-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.286183 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p78pj\" (UniqueName: \"kubernetes.io/projected/ff101780-4112-48ff-a0f8-d5acff017705-kube-api-access-p78pj\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.295324 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.302040 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-4wc26" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.318527 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.335519 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.387230 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxxd2\" (UniqueName: \"kubernetes.io/projected/1f42737f-e000-4c42-8862-9b55c145364c-kube-api-access-vxxd2\") pod \"1f42737f-e000-4c42-8862-9b55c145364c\" (UID: \"1f42737f-e000-4c42-8862-9b55c145364c\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.387310 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-874tt\" (UniqueName: \"kubernetes.io/projected/f055c78c-148d-47e9-847c-99dfe92b152a-kube-api-access-874tt\") pod \"f055c78c-148d-47e9-847c-99dfe92b152a\" (UID: \"f055c78c-148d-47e9-847c-99dfe92b152a\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.387536 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f055c78c-148d-47e9-847c-99dfe92b152a-operator-scripts\") pod \"f055c78c-148d-47e9-847c-99dfe92b152a\" (UID: \"f055c78c-148d-47e9-847c-99dfe92b152a\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.387584 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f42737f-e000-4c42-8862-9b55c145364c-operator-scripts\") pod \"1f42737f-e000-4c42-8862-9b55c145364c\" (UID: \"1f42737f-e000-4c42-8862-9b55c145364c\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.387632 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cshps\" (UniqueName: \"kubernetes.io/projected/c24cb3a0-a66a-4419-8427-a357605ab17a-kube-api-access-cshps\") pod \"c24cb3a0-a66a-4419-8427-a357605ab17a\" (UID: \"c24cb3a0-a66a-4419-8427-a357605ab17a\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.388375 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c24cb3a0-a66a-4419-8427-a357605ab17a-operator-scripts\") pod \"c24cb3a0-a66a-4419-8427-a357605ab17a\" (UID: \"c24cb3a0-a66a-4419-8427-a357605ab17a\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.389577 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f42737f-e000-4c42-8862-9b55c145364c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1f42737f-e000-4c42-8862-9b55c145364c" (UID: "1f42737f-e000-4c42-8862-9b55c145364c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.389972 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f055c78c-148d-47e9-847c-99dfe92b152a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f055c78c-148d-47e9-847c-99dfe92b152a" (UID: "f055c78c-148d-47e9-847c-99dfe92b152a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.390023 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24cb3a0-a66a-4419-8427-a357605ab17a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c24cb3a0-a66a-4419-8427-a357605ab17a" (UID: "c24cb3a0-a66a-4419-8427-a357605ab17a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.393025 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f055c78c-148d-47e9-847c-99dfe92b152a-kube-api-access-874tt" (OuterVolumeSpecName: "kube-api-access-874tt") pod "f055c78c-148d-47e9-847c-99dfe92b152a" (UID: "f055c78c-148d-47e9-847c-99dfe92b152a"). InnerVolumeSpecName "kube-api-access-874tt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.393210 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c24cb3a0-a66a-4419-8427-a357605ab17a-kube-api-access-cshps" (OuterVolumeSpecName: "kube-api-access-cshps") pod "c24cb3a0-a66a-4419-8427-a357605ab17a" (UID: "c24cb3a0-a66a-4419-8427-a357605ab17a"). InnerVolumeSpecName "kube-api-access-cshps". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.394701 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f42737f-e000-4c42-8862-9b55c145364c-kube-api-access-vxxd2" (OuterVolumeSpecName: "kube-api-access-vxxd2") pod "1f42737f-e000-4c42-8862-9b55c145364c" (UID: "1f42737f-e000-4c42-8862-9b55c145364c"). InnerVolumeSpecName "kube-api-access-vxxd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.490469 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99812e98-c4ee-439d-aea7-71a1ac4f02fa-operator-scripts\") pod \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\" (UID: \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.491049 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ct6d\" (UniqueName: \"kubernetes.io/projected/99812e98-c4ee-439d-aea7-71a1ac4f02fa-kube-api-access-2ct6d\") pod \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\" (UID: \"99812e98-c4ee-439d-aea7-71a1ac4f02fa\") " Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.491889 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f42737f-e000-4c42-8862-9b55c145364c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.491916 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cshps\" (UniqueName: \"kubernetes.io/projected/c24cb3a0-a66a-4419-8427-a357605ab17a-kube-api-access-cshps\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.491928 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c24cb3a0-a66a-4419-8427-a357605ab17a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.491938 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxxd2\" (UniqueName: \"kubernetes.io/projected/1f42737f-e000-4c42-8862-9b55c145364c-kube-api-access-vxxd2\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.491949 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-874tt\" (UniqueName: \"kubernetes.io/projected/f055c78c-148d-47e9-847c-99dfe92b152a-kube-api-access-874tt\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.491958 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f055c78c-148d-47e9-847c-99dfe92b152a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.491985 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99812e98-c4ee-439d-aea7-71a1ac4f02fa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "99812e98-c4ee-439d-aea7-71a1ac4f02fa" (UID: "99812e98-c4ee-439d-aea7-71a1ac4f02fa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.495598 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99812e98-c4ee-439d-aea7-71a1ac4f02fa-kube-api-access-2ct6d" (OuterVolumeSpecName: "kube-api-access-2ct6d") pod "99812e98-c4ee-439d-aea7-71a1ac4f02fa" (UID: "99812e98-c4ee-439d-aea7-71a1ac4f02fa"). InnerVolumeSpecName "kube-api-access-2ct6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.550649 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-2rc6d" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.550686 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-2rc6d" event={"ID":"99812e98-c4ee-439d-aea7-71a1ac4f02fa","Type":"ContainerDied","Data":"8f68d5119b9cc00812f01b28ebcf60defe3900415cf6a31bffe9c07904b72544"} Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.550737 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f68d5119b9cc00812f01b28ebcf60defe3900415cf6a31bffe9c07904b72544" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.553102 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b8e2-account-create-update-zz4f6" event={"ID":"c24cb3a0-a66a-4419-8427-a357605ab17a","Type":"ContainerDied","Data":"f1e66f9a2ad080b7b12bc40060793379a80746cf5829e9ab34ea7cdc7dd63ff6"} Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.553161 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1e66f9a2ad080b7b12bc40060793379a80746cf5829e9ab34ea7cdc7dd63ff6" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.553209 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b8e2-account-create-update-zz4f6" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.555585 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5b25-account-create-update-csj55" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.555588 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5b25-account-create-update-csj55" event={"ID":"ff101780-4112-48ff-a0f8-d5acff017705","Type":"ContainerDied","Data":"19bd025a25ec5b31f8747032a9b067533b1bcd00ae7ac389fb5183fb6875d070"} Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.555771 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19bd025a25ec5b31f8747032a9b067533b1bcd00ae7ac389fb5183fb6875d070" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.571442 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-04e3-account-create-update-tqpxc" event={"ID":"f055c78c-148d-47e9-847c-99dfe92b152a","Type":"ContainerDied","Data":"73917b41672cab4d6b5a86db56c94d635402dbb1701be73b009761f029de3cc4"} Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.571499 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73917b41672cab4d6b5a86db56c94d635402dbb1701be73b009761f029de3cc4" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.571588 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-04e3-account-create-update-tqpxc" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.575023 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-4wc26" event={"ID":"1f42737f-e000-4c42-8862-9b55c145364c","Type":"ContainerDied","Data":"310253c81a3b90167e90a0bffe5971a898fd11d51da36028e56f4d64925f6b5d"} Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.575079 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="310253c81a3b90167e90a0bffe5971a898fd11d51da36028e56f4d64925f6b5d" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.575178 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-4wc26" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.595934 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99812e98-c4ee-439d-aea7-71a1ac4f02fa-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.595984 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ct6d\" (UniqueName: \"kubernetes.io/projected/99812e98-c4ee-439d-aea7-71a1ac4f02fa-kube-api-access-2ct6d\") on node \"crc\" DevicePath \"\"" Jan 28 15:36:47 crc kubenswrapper[4959]: I0128 15:36:47.635678 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-bp544" podUID="93fc210e-4599-4436-b8e6-a20a8c5cd2b4" containerName="ovn-controller" probeResult="failure" output=< Jan 28 15:36:47 crc kubenswrapper[4959]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 15:36:47 crc kubenswrapper[4959]: > Jan 28 15:36:48 crc kubenswrapper[4959]: I0128 15:36:48.600356 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"aed9eab61b26a45ecb774472d85bd9eff0b9d6e1664d9abf7b8c440ac355e6c8"} Jan 28 15:36:48 crc kubenswrapper[4959]: I0128 15:36:48.600756 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"b7ec316f7df68eef1678d4d21dbc19764adc773250d135ae5d053a4166cb7472"} Jan 28 15:36:49 crc kubenswrapper[4959]: I0128 15:36:49.630446 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"2b7a26c4931de01191bdbb9fcdbe18dc96bfa429681597839f1709ec33f68517"} Jan 28 15:36:49 crc kubenswrapper[4959]: I0128 15:36:49.631089 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"26022d1a097f4b0b3f7af36760c20071d7fd0dc4752bcc9e3bb11fd953d7282a"} Jan 28 15:36:49 crc kubenswrapper[4959]: I0128 15:36:49.631146 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"33bbd15e0352e5e3dba9772cb10d6c5f24ca6277041fc64801abea7230c9aed1"} Jan 28 15:36:49 crc kubenswrapper[4959]: I0128 15:36:49.631182 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"3b24093eb45d679737e2c2e5e906df8b0546287217c3d3274c8d8627fb352997"} Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.649492 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2c21863c-592f-436a-8fe2-06b0f78b7755","Type":"ContainerStarted","Data":"fad61e7d147c2b136fb431fceac36ff8093d65a9c6bb44d0e6751988005655e6"} Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.652039 4959 generic.go:334] "Generic (PLEG): container finished" podID="a81258f3-e48f-44f0-93d9-02e58302683a" containerID="bdb6462959cea0d57d3c002a05a390ad101555be0a54d21096014e3c5c1c29cc" exitCode=0 Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.652095 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a81258f3-e48f-44f0-93d9-02e58302683a","Type":"ContainerDied","Data":"bdb6462959cea0d57d3c002a05a390ad101555be0a54d21096014e3c5c1c29cc"} Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.655851 4959 generic.go:334] "Generic (PLEG): container finished" podID="ee1fc53a-3817-4c94-8bd6-569c089c02cb" containerID="95fde7fc0919101006d40aab1d39f00eac950d7d8fdabc9d9734b0f2c8980756" exitCode=0 Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.655889 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ee1fc53a-3817-4c94-8bd6-569c089c02cb","Type":"ContainerDied","Data":"95fde7fc0919101006d40aab1d39f00eac950d7d8fdabc9d9734b0f2c8980756"} Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.731486 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.526410015 podStartE2EDuration="53.731456772s" podCreationTimestamp="2026-01-28 15:35:57 +0000 UTC" firstStartedPulling="2026-01-28 15:36:31.642640179 +0000 UTC m=+1175.088546562" lastFinishedPulling="2026-01-28 15:36:47.847686936 +0000 UTC m=+1191.293593319" observedRunningTime="2026-01-28 15:36:50.700352047 +0000 UTC m=+1194.146258440" watchObservedRunningTime="2026-01-28 15:36:50.731456772 +0000 UTC m=+1194.177363155" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970078 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-l6hzt"] Jan 28 15:36:50 crc kubenswrapper[4959]: E0128 15:36:50.970559 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff101780-4112-48ff-a0f8-d5acff017705" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970584 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff101780-4112-48ff-a0f8-d5acff017705" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: E0128 15:36:50.970602 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f055c78c-148d-47e9-847c-99dfe92b152a" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970611 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="f055c78c-148d-47e9-847c-99dfe92b152a" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: E0128 15:36:50.970625 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99812e98-c4ee-439d-aea7-71a1ac4f02fa" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970633 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="99812e98-c4ee-439d-aea7-71a1ac4f02fa" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: E0128 15:36:50.970649 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f42737f-e000-4c42-8862-9b55c145364c" containerName="mariadb-database-create" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970657 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f42737f-e000-4c42-8862-9b55c145364c" containerName="mariadb-database-create" Jan 28 15:36:50 crc kubenswrapper[4959]: E0128 15:36:50.970676 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5766c03-911f-42fc-9e74-83e62a42d6e4" containerName="mariadb-database-create" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970682 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5766c03-911f-42fc-9e74-83e62a42d6e4" containerName="mariadb-database-create" Jan 28 15:36:50 crc kubenswrapper[4959]: E0128 15:36:50.970698 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c24cb3a0-a66a-4419-8427-a357605ab17a" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970707 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="c24cb3a0-a66a-4419-8427-a357605ab17a" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970867 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="c24cb3a0-a66a-4419-8427-a357605ab17a" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970888 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="99812e98-c4ee-439d-aea7-71a1ac4f02fa" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970900 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff101780-4112-48ff-a0f8-d5acff017705" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970911 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="f055c78c-148d-47e9-847c-99dfe92b152a" containerName="mariadb-account-create-update" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970919 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f42737f-e000-4c42-8862-9b55c145364c" containerName="mariadb-database-create" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.970931 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5766c03-911f-42fc-9e74-83e62a42d6e4" containerName="mariadb-database-create" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.971634 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.974451 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-9582l" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.977925 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 28 15:36:50 crc kubenswrapper[4959]: I0128 15:36:50.989494 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-l6hzt"] Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.069342 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-swdsq"] Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.071315 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.081523 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.085137 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-swdsq"] Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.089574 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-config-data\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.089634 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-db-sync-config-data\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.089879 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-combined-ca-bundle\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.089995 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77pzh\" (UniqueName: \"kubernetes.io/projected/2ac41b31-1ffd-4f8c-b693-7505c6894794-kube-api-access-77pzh\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.192177 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.192256 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-config-data\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.192302 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-db-sync-config-data\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.192689 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-combined-ca-bundle\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.192819 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77pzh\" (UniqueName: \"kubernetes.io/projected/2ac41b31-1ffd-4f8c-b693-7505c6894794-kube-api-access-77pzh\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.192866 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwhlx\" (UniqueName: \"kubernetes.io/projected/29192ab3-7f28-49be-a60e-b13906e49b10-kube-api-access-cwhlx\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.192938 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.193095 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.193149 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-config\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.193229 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.201218 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-db-sync-config-data\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.201352 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-config-data\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.201588 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-combined-ca-bundle\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.215777 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77pzh\" (UniqueName: \"kubernetes.io/projected/2ac41b31-1ffd-4f8c-b693-7505c6894794-kube-api-access-77pzh\") pod \"glance-db-sync-l6hzt\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.295541 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.295730 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.295802 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-config\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.295852 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.295936 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.296131 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwhlx\" (UniqueName: \"kubernetes.io/projected/29192ab3-7f28-49be-a60e-b13906e49b10-kube-api-access-cwhlx\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.296731 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.297289 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.297552 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.298156 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.298834 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-l6hzt" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.304184 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-config\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.320206 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwhlx\" (UniqueName: \"kubernetes.io/projected/29192ab3-7f28-49be-a60e-b13906e49b10-kube-api-access-cwhlx\") pod \"dnsmasq-dns-77585f5f8c-swdsq\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.396422 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.710686 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a81258f3-e48f-44f0-93d9-02e58302683a","Type":"ContainerStarted","Data":"907381c2631594d5274485e1dc6b53466127fc092b1ee5d2d2b90bd749e8bc25"} Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.711186 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.741725 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ee1fc53a-3817-4c94-8bd6-569c089c02cb","Type":"ContainerStarted","Data":"80fec639aaab509bc85938cbdd655e5119ea6ca5a23b2f402b0f46019d3b6f2d"} Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.742953 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.791698 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=46.302098696 podStartE2EDuration="1m51.791664878s" podCreationTimestamp="2026-01-28 15:35:00 +0000 UTC" firstStartedPulling="2026-01-28 15:35:02.761151968 +0000 UTC m=+1086.207058351" lastFinishedPulling="2026-01-28 15:36:08.25071814 +0000 UTC m=+1151.696624533" observedRunningTime="2026-01-28 15:36:51.775444619 +0000 UTC m=+1195.221351012" watchObservedRunningTime="2026-01-28 15:36:51.791664878 +0000 UTC m=+1195.237571281" Jan 28 15:36:51 crc kubenswrapper[4959]: I0128 15:36:51.842262 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=47.326902472 podStartE2EDuration="1m50.842237172s" podCreationTimestamp="2026-01-28 15:35:01 +0000 UTC" firstStartedPulling="2026-01-28 15:35:03.001041594 +0000 UTC m=+1086.446947987" lastFinishedPulling="2026-01-28 15:36:06.516376304 +0000 UTC m=+1149.962282687" observedRunningTime="2026-01-28 15:36:51.841447923 +0000 UTC m=+1195.287354316" watchObservedRunningTime="2026-01-28 15:36:51.842237172 +0000 UTC m=+1195.288143555" Jan 28 15:36:52 crc kubenswrapper[4959]: I0128 15:36:52.405977 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-swdsq"] Jan 28 15:36:52 crc kubenswrapper[4959]: W0128 15:36:52.413852 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29192ab3_7f28_49be_a60e_b13906e49b10.slice/crio-9eae874023f55d92e78794b5ad2e8f0cabcb6c1a7445eaed0a644bf87a1d827a WatchSource:0}: Error finding container 9eae874023f55d92e78794b5ad2e8f0cabcb6c1a7445eaed0a644bf87a1d827a: Status 404 returned error can't find the container with id 9eae874023f55d92e78794b5ad2e8f0cabcb6c1a7445eaed0a644bf87a1d827a Jan 28 15:36:52 crc kubenswrapper[4959]: W0128 15:36:52.465438 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ac41b31_1ffd_4f8c_b693_7505c6894794.slice/crio-45a5703c55315464541e56cdd5132e4c691e7c890a83fc937cbb72ab32810d31 WatchSource:0}: Error finding container 45a5703c55315464541e56cdd5132e4c691e7c890a83fc937cbb72ab32810d31: Status 404 returned error can't find the container with id 45a5703c55315464541e56cdd5132e4c691e7c890a83fc937cbb72ab32810d31 Jan 28 15:36:52 crc kubenswrapper[4959]: I0128 15:36:52.474516 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-l6hzt"] Jan 28 15:36:52 crc kubenswrapper[4959]: I0128 15:36:52.657198 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-bp544" podUID="93fc210e-4599-4436-b8e6-a20a8c5cd2b4" containerName="ovn-controller" probeResult="failure" output=< Jan 28 15:36:52 crc kubenswrapper[4959]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 15:36:52 crc kubenswrapper[4959]: > Jan 28 15:36:52 crc kubenswrapper[4959]: I0128 15:36:52.703721 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:36:52 crc kubenswrapper[4959]: I0128 15:36:52.757976 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-l6hzt" event={"ID":"2ac41b31-1ffd-4f8c-b693-7505c6894794","Type":"ContainerStarted","Data":"45a5703c55315464541e56cdd5132e4c691e7c890a83fc937cbb72ab32810d31"} Jan 28 15:36:52 crc kubenswrapper[4959]: I0128 15:36:52.761140 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" event={"ID":"29192ab3-7f28-49be-a60e-b13906e49b10","Type":"ContainerStarted","Data":"9eae874023f55d92e78794b5ad2e8f0cabcb6c1a7445eaed0a644bf87a1d827a"} Jan 28 15:36:53 crc kubenswrapper[4959]: I0128 15:36:53.771824 4959 generic.go:334] "Generic (PLEG): container finished" podID="29192ab3-7f28-49be-a60e-b13906e49b10" containerID="3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a" exitCode=0 Jan 28 15:36:53 crc kubenswrapper[4959]: I0128 15:36:53.771885 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" event={"ID":"29192ab3-7f28-49be-a60e-b13906e49b10","Type":"ContainerDied","Data":"3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a"} Jan 28 15:36:54 crc kubenswrapper[4959]: I0128 15:36:54.783499 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" event={"ID":"29192ab3-7f28-49be-a60e-b13906e49b10","Type":"ContainerStarted","Data":"3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49"} Jan 28 15:36:54 crc kubenswrapper[4959]: I0128 15:36:54.783997 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:36:54 crc kubenswrapper[4959]: I0128 15:36:54.812526 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" podStartSLOduration=3.812503006 podStartE2EDuration="3.812503006s" podCreationTimestamp="2026-01-28 15:36:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:36:54.804109449 +0000 UTC m=+1198.250015862" watchObservedRunningTime="2026-01-28 15:36:54.812503006 +0000 UTC m=+1198.258409389" Jan 28 15:36:57 crc kubenswrapper[4959]: I0128 15:36:57.633646 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-bp544" podUID="93fc210e-4599-4436-b8e6-a20a8c5cd2b4" containerName="ovn-controller" probeResult="failure" output=< Jan 28 15:36:57 crc kubenswrapper[4959]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 15:36:57 crc kubenswrapper[4959]: > Jan 28 15:36:57 crc kubenswrapper[4959]: I0128 15:36:57.647220 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-zf2dp" Jan 28 15:36:57 crc kubenswrapper[4959]: I0128 15:36:57.900644 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-bp544-config-b5rlp"] Jan 28 15:36:57 crc kubenswrapper[4959]: I0128 15:36:57.902196 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:57 crc kubenswrapper[4959]: I0128 15:36:57.907435 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 28 15:36:57 crc kubenswrapper[4959]: I0128 15:36:57.919024 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-bp544-config-b5rlp"] Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.077526 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-scripts\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.077604 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-additional-scripts\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.077659 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.077680 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run-ovn\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.077710 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9wpw\" (UniqueName: \"kubernetes.io/projected/e500457c-6f13-4151-9491-1e995278fff6-kube-api-access-q9wpw\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.077883 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-log-ovn\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.179359 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.179416 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run-ovn\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.179450 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9wpw\" (UniqueName: \"kubernetes.io/projected/e500457c-6f13-4151-9491-1e995278fff6-kube-api-access-q9wpw\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.179496 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-log-ovn\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.179560 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-scripts\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.179583 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-additional-scripts\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.179965 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.179997 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run-ovn\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.180003 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-log-ovn\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.180366 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-additional-scripts\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.182645 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-scripts\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.213805 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9wpw\" (UniqueName: \"kubernetes.io/projected/e500457c-6f13-4151-9491-1e995278fff6-kube-api-access-q9wpw\") pod \"ovn-controller-bp544-config-b5rlp\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.256918 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.691732 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:36:58 crc kubenswrapper[4959]: I0128 15:36:58.691813 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:37:01 crc kubenswrapper[4959]: I0128 15:37:01.398428 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:37:01 crc kubenswrapper[4959]: I0128 15:37:01.488637 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-4spbq"] Jan 28 15:37:01 crc kubenswrapper[4959]: I0128 15:37:01.488942 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-4spbq" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerName="dnsmasq-dns" containerID="cri-o://d2b39d4939b9560c4a01929d05a4f115e04f5c8d50579c775191e4acda8eb8d2" gracePeriod=10 Jan 28 15:37:02 crc kubenswrapper[4959]: I0128 15:37:02.027667 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="a81258f3-e48f-44f0-93d9-02e58302683a" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Jan 28 15:37:02 crc kubenswrapper[4959]: I0128 15:37:02.412149 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="ee1fc53a-3817-4c94-8bd6-569c089c02cb" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Jan 28 15:37:02 crc kubenswrapper[4959]: I0128 15:37:02.775504 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-bp544" podUID="93fc210e-4599-4436-b8e6-a20a8c5cd2b4" containerName="ovn-controller" probeResult="failure" output=< Jan 28 15:37:02 crc kubenswrapper[4959]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 15:37:02 crc kubenswrapper[4959]: > Jan 28 15:37:02 crc kubenswrapper[4959]: I0128 15:37:02.870284 4959 generic.go:334] "Generic (PLEG): container finished" podID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerID="d2b39d4939b9560c4a01929d05a4f115e04f5c8d50579c775191e4acda8eb8d2" exitCode=0 Jan 28 15:37:02 crc kubenswrapper[4959]: I0128 15:37:02.870351 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-4spbq" event={"ID":"2dd19028-8bc9-48e7-a361-8581aa9d5d29","Type":"ContainerDied","Data":"d2b39d4939b9560c4a01929d05a4f115e04f5c8d50579c775191e4acda8eb8d2"} Jan 28 15:37:03 crc kubenswrapper[4959]: I0128 15:37:03.131897 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-4spbq" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.114:5353: connect: connection refused" Jan 28 15:37:07 crc kubenswrapper[4959]: I0128 15:37:07.634031 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-bp544" podUID="93fc210e-4599-4436-b8e6-a20a8c5cd2b4" containerName="ovn-controller" probeResult="failure" output=< Jan 28 15:37:07 crc kubenswrapper[4959]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 15:37:07 crc kubenswrapper[4959]: > Jan 28 15:37:08 crc kubenswrapper[4959]: I0128 15:37:08.132290 4959 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-4spbq" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.114:5353: connect: connection refused" Jan 28 15:37:09 crc kubenswrapper[4959]: E0128 15:37:09.197666 4959 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Jan 28 15:37:09 crc kubenswrapper[4959]: E0128 15:37:09.198236 4959 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-77pzh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-l6hzt_openstack(2ac41b31-1ffd-4f8c-b693-7505c6894794): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 15:37:09 crc kubenswrapper[4959]: E0128 15:37:09.199459 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-l6hzt" podUID="2ac41b31-1ffd-4f8c-b693-7505c6894794" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.492942 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.626847 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnbck\" (UniqueName: \"kubernetes.io/projected/2dd19028-8bc9-48e7-a361-8581aa9d5d29-kube-api-access-wnbck\") pod \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.627129 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-nb\") pod \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.627348 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-sb\") pod \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.627555 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-dns-svc\") pod \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.627710 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-config\") pod \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\" (UID: \"2dd19028-8bc9-48e7-a361-8581aa9d5d29\") " Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.629065 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-bp544-config-b5rlp"] Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.646703 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dd19028-8bc9-48e7-a361-8581aa9d5d29-kube-api-access-wnbck" (OuterVolumeSpecName: "kube-api-access-wnbck") pod "2dd19028-8bc9-48e7-a361-8581aa9d5d29" (UID: "2dd19028-8bc9-48e7-a361-8581aa9d5d29"). InnerVolumeSpecName "kube-api-access-wnbck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.677249 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2dd19028-8bc9-48e7-a361-8581aa9d5d29" (UID: "2dd19028-8bc9-48e7-a361-8581aa9d5d29"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.681214 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2dd19028-8bc9-48e7-a361-8581aa9d5d29" (UID: "2dd19028-8bc9-48e7-a361-8581aa9d5d29"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.687391 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2dd19028-8bc9-48e7-a361-8581aa9d5d29" (UID: "2dd19028-8bc9-48e7-a361-8581aa9d5d29"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.688000 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-config" (OuterVolumeSpecName: "config") pod "2dd19028-8bc9-48e7-a361-8581aa9d5d29" (UID: "2dd19028-8bc9-48e7-a361-8581aa9d5d29"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.729729 4959 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.729769 4959 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.729780 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.729789 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnbck\" (UniqueName: \"kubernetes.io/projected/2dd19028-8bc9-48e7-a361-8581aa9d5d29-kube-api-access-wnbck\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.729801 4959 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2dd19028-8bc9-48e7-a361-8581aa9d5d29-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.960468 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-bp544-config-b5rlp" event={"ID":"e500457c-6f13-4151-9491-1e995278fff6","Type":"ContainerStarted","Data":"7a6e23622c3e247a1379ea5384675d839f42b6a6e9b6306edca5c0bdd9c06ec4"} Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.962402 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-4spbq" event={"ID":"2dd19028-8bc9-48e7-a361-8581aa9d5d29","Type":"ContainerDied","Data":"1ee7d042fe883cf5c7396756fcd1255a7be2a6c06a00b77493b1c4f840dfc985"} Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.962452 4959 scope.go:117] "RemoveContainer" containerID="d2b39d4939b9560c4a01929d05a4f115e04f5c8d50579c775191e4acda8eb8d2" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.962626 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-4spbq" Jan 28 15:37:09 crc kubenswrapper[4959]: E0128 15:37:09.964284 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-l6hzt" podUID="2ac41b31-1ffd-4f8c-b693-7505c6894794" Jan 28 15:37:09 crc kubenswrapper[4959]: I0128 15:37:09.993789 4959 scope.go:117] "RemoveContainer" containerID="91cea2f5be084bdbff9356a0b39ef083492cf52c78f8d1c7db7232bd9850bfde" Jan 28 15:37:10 crc kubenswrapper[4959]: E0128 15:37:10.011903 4959 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2dd19028_8bc9_48e7_a361_8581aa9d5d29.slice/crio-1ee7d042fe883cf5c7396756fcd1255a7be2a6c06a00b77493b1c4f840dfc985\": RecentStats: unable to find data in memory cache]" Jan 28 15:37:10 crc kubenswrapper[4959]: I0128 15:37:10.016885 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-4spbq"] Jan 28 15:37:10 crc kubenswrapper[4959]: I0128 15:37:10.028309 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-4spbq"] Jan 28 15:37:10 crc kubenswrapper[4959]: I0128 15:37:10.598171 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" path="/var/lib/kubelet/pods/2dd19028-8bc9-48e7-a361-8581aa9d5d29/volumes" Jan 28 15:37:10 crc kubenswrapper[4959]: I0128 15:37:10.974476 4959 generic.go:334] "Generic (PLEG): container finished" podID="e500457c-6f13-4151-9491-1e995278fff6" containerID="3f932d58d2ecae43f58dd3916c6ce6aa0e8febf7474ecf1f964f43b00ec4dcdc" exitCode=0 Jan 28 15:37:10 crc kubenswrapper[4959]: I0128 15:37:10.974550 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-bp544-config-b5rlp" event={"ID":"e500457c-6f13-4151-9491-1e995278fff6","Type":"ContainerDied","Data":"3f932d58d2ecae43f58dd3916c6ce6aa0e8febf7474ecf1f964f43b00ec4dcdc"} Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.026343 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.419853 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.631947 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.659760 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-4f1d-account-create-update-mh64s"] Jan 28 15:37:12 crc kubenswrapper[4959]: E0128 15:37:12.660212 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerName="init" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.660227 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerName="init" Jan 28 15:37:12 crc kubenswrapper[4959]: E0128 15:37:12.660248 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerName="dnsmasq-dns" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.660254 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerName="dnsmasq-dns" Jan 28 15:37:12 crc kubenswrapper[4959]: E0128 15:37:12.660269 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e500457c-6f13-4151-9491-1e995278fff6" containerName="ovn-config" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.660275 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="e500457c-6f13-4151-9491-1e995278fff6" containerName="ovn-config" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.660442 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dd19028-8bc9-48e7-a361-8581aa9d5d29" containerName="dnsmasq-dns" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.660465 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="e500457c-6f13-4151-9491-1e995278fff6" containerName="ovn-config" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.661058 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.678602 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.688687 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-9q2mr"] Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.690020 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.693100 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-additional-scripts\") pod \"e500457c-6f13-4151-9491-1e995278fff6\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.693216 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run-ovn\") pod \"e500457c-6f13-4151-9491-1e995278fff6\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.693284 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9wpw\" (UniqueName: \"kubernetes.io/projected/e500457c-6f13-4151-9491-1e995278fff6-kube-api-access-q9wpw\") pod \"e500457c-6f13-4151-9491-1e995278fff6\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.693324 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run\") pod \"e500457c-6f13-4151-9491-1e995278fff6\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.693359 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-log-ovn\") pod \"e500457c-6f13-4151-9491-1e995278fff6\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.693398 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-scripts\") pod \"e500457c-6f13-4151-9491-1e995278fff6\" (UID: \"e500457c-6f13-4151-9491-1e995278fff6\") " Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.693746 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-operator-scripts\") pod \"cinder-db-create-9q2mr\" (UID: \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\") " pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.693802 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aff13d88-7544-4442-ab55-4c426ccca467-operator-scripts\") pod \"cinder-4f1d-account-create-update-mh64s\" (UID: \"aff13d88-7544-4442-ab55-4c426ccca467\") " pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.694024 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nzrp\" (UniqueName: \"kubernetes.io/projected/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-kube-api-access-7nzrp\") pod \"cinder-db-create-9q2mr\" (UID: \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\") " pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.694085 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtf4d\" (UniqueName: \"kubernetes.io/projected/aff13d88-7544-4442-ab55-4c426ccca467-kube-api-access-rtf4d\") pod \"cinder-4f1d-account-create-update-mh64s\" (UID: \"aff13d88-7544-4442-ab55-4c426ccca467\") " pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.694469 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run" (OuterVolumeSpecName: "var-run") pod "e500457c-6f13-4151-9491-1e995278fff6" (UID: "e500457c-6f13-4151-9491-1e995278fff6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.694566 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e500457c-6f13-4151-9491-1e995278fff6" (UID: "e500457c-6f13-4151-9491-1e995278fff6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.695069 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e500457c-6f13-4151-9491-1e995278fff6" (UID: "e500457c-6f13-4151-9491-1e995278fff6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.696167 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e500457c-6f13-4151-9491-1e995278fff6" (UID: "e500457c-6f13-4151-9491-1e995278fff6"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.696736 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4f1d-account-create-update-mh64s"] Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.704319 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-scripts" (OuterVolumeSpecName: "scripts") pod "e500457c-6f13-4151-9491-1e995278fff6" (UID: "e500457c-6f13-4151-9491-1e995278fff6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.711239 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e500457c-6f13-4151-9491-1e995278fff6-kube-api-access-q9wpw" (OuterVolumeSpecName: "kube-api-access-q9wpw") pod "e500457c-6f13-4151-9491-1e995278fff6" (UID: "e500457c-6f13-4151-9491-1e995278fff6"). InnerVolumeSpecName "kube-api-access-q9wpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.720494 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-bp544" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.741213 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-9q2mr"] Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.796512 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtf4d\" (UniqueName: \"kubernetes.io/projected/aff13d88-7544-4442-ab55-4c426ccca467-kube-api-access-rtf4d\") pod \"cinder-4f1d-account-create-update-mh64s\" (UID: \"aff13d88-7544-4442-ab55-4c426ccca467\") " pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.796604 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-operator-scripts\") pod \"cinder-db-create-9q2mr\" (UID: \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\") " pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.796637 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aff13d88-7544-4442-ab55-4c426ccca467-operator-scripts\") pod \"cinder-4f1d-account-create-update-mh64s\" (UID: \"aff13d88-7544-4442-ab55-4c426ccca467\") " pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.796728 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nzrp\" (UniqueName: \"kubernetes.io/projected/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-kube-api-access-7nzrp\") pod \"cinder-db-create-9q2mr\" (UID: \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\") " pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.799059 4959 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.799978 4959 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.800030 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9wpw\" (UniqueName: \"kubernetes.io/projected/e500457c-6f13-4151-9491-1e995278fff6-kube-api-access-q9wpw\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.800046 4959 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-run\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.800056 4959 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e500457c-6f13-4151-9491-1e995278fff6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.800070 4959 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e500457c-6f13-4151-9491-1e995278fff6-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.800849 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-operator-scripts\") pod \"cinder-db-create-9q2mr\" (UID: \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\") " pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.800873 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aff13d88-7544-4442-ab55-4c426ccca467-operator-scripts\") pod \"cinder-4f1d-account-create-update-mh64s\" (UID: \"aff13d88-7544-4442-ab55-4c426ccca467\") " pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.844846 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-srp8f"] Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.850308 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtf4d\" (UniqueName: \"kubernetes.io/projected/aff13d88-7544-4442-ab55-4c426ccca467-kube-api-access-rtf4d\") pod \"cinder-4f1d-account-create-update-mh64s\" (UID: \"aff13d88-7544-4442-ab55-4c426ccca467\") " pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.868948 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nzrp\" (UniqueName: \"kubernetes.io/projected/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-kube-api-access-7nzrp\") pod \"cinder-db-create-9q2mr\" (UID: \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\") " pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.887187 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-srp8f"] Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.887351 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.982464 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-9zj97"] Jan 28 15:37:12 crc kubenswrapper[4959]: I0128 15:37:12.984085 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.005308 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq2gb\" (UniqueName: \"kubernetes.io/projected/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-kube-api-access-qq2gb\") pod \"barbican-db-create-srp8f\" (UID: \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\") " pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.005374 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-operator-scripts\") pod \"barbican-db-create-srp8f\" (UID: \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\") " pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.005588 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.009649 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-bp544-config-b5rlp" event={"ID":"e500457c-6f13-4151-9491-1e995278fff6","Type":"ContainerDied","Data":"7a6e23622c3e247a1379ea5384675d839f42b6a6e9b6306edca5c0bdd9c06ec4"} Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.009720 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a6e23622c3e247a1379ea5384675d839f42b6a6e9b6306edca5c0bdd9c06ec4" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.009820 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-bp544-config-b5rlp" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.024512 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-9zj97"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.061696 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-024e-account-create-update-99mmk"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.062631 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.069332 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.082801 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.084917 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-024e-account-create-update-99mmk"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.106595 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-operator-scripts\") pod \"barbican-db-create-srp8f\" (UID: \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\") " pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.106713 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-operator-scripts\") pod \"neutron-db-create-9zj97\" (UID: \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\") " pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.106785 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqvjg\" (UniqueName: \"kubernetes.io/projected/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-kube-api-access-vqvjg\") pod \"neutron-db-create-9zj97\" (UID: \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\") " pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.106833 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq2gb\" (UniqueName: \"kubernetes.io/projected/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-kube-api-access-qq2gb\") pod \"barbican-db-create-srp8f\" (UID: \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\") " pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.108024 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-operator-scripts\") pod \"barbican-db-create-srp8f\" (UID: \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\") " pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.124427 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8467-account-create-update-tz2bg"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.126215 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.134338 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.151268 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8467-account-create-update-tz2bg"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.159589 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq2gb\" (UniqueName: \"kubernetes.io/projected/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-kube-api-access-qq2gb\") pod \"barbican-db-create-srp8f\" (UID: \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\") " pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.208694 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d68b30f6-ed68-4a06-987d-38bb6853c206-operator-scripts\") pod \"neutron-8467-account-create-update-tz2bg\" (UID: \"d68b30f6-ed68-4a06-987d-38bb6853c206\") " pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.208768 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqvjg\" (UniqueName: \"kubernetes.io/projected/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-kube-api-access-vqvjg\") pod \"neutron-db-create-9zj97\" (UID: \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\") " pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.208813 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e92578e-1d79-49bd-9144-76a8450afc9a-operator-scripts\") pod \"barbican-024e-account-create-update-99mmk\" (UID: \"6e92578e-1d79-49bd-9144-76a8450afc9a\") " pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.208898 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59vp6\" (UniqueName: \"kubernetes.io/projected/d68b30f6-ed68-4a06-987d-38bb6853c206-kube-api-access-59vp6\") pod \"neutron-8467-account-create-update-tz2bg\" (UID: \"d68b30f6-ed68-4a06-987d-38bb6853c206\") " pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.208936 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh52j\" (UniqueName: \"kubernetes.io/projected/6e92578e-1d79-49bd-9144-76a8450afc9a-kube-api-access-dh52j\") pod \"barbican-024e-account-create-update-99mmk\" (UID: \"6e92578e-1d79-49bd-9144-76a8450afc9a\") " pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.208961 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-operator-scripts\") pod \"neutron-db-create-9zj97\" (UID: \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\") " pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.210659 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-operator-scripts\") pod \"neutron-db-create-9zj97\" (UID: \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\") " pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.229972 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqvjg\" (UniqueName: \"kubernetes.io/projected/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-kube-api-access-vqvjg\") pod \"neutron-db-create-9zj97\" (UID: \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\") " pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.263513 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.314618 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59vp6\" (UniqueName: \"kubernetes.io/projected/d68b30f6-ed68-4a06-987d-38bb6853c206-kube-api-access-59vp6\") pod \"neutron-8467-account-create-update-tz2bg\" (UID: \"d68b30f6-ed68-4a06-987d-38bb6853c206\") " pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.314693 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh52j\" (UniqueName: \"kubernetes.io/projected/6e92578e-1d79-49bd-9144-76a8450afc9a-kube-api-access-dh52j\") pod \"barbican-024e-account-create-update-99mmk\" (UID: \"6e92578e-1d79-49bd-9144-76a8450afc9a\") " pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.314754 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d68b30f6-ed68-4a06-987d-38bb6853c206-operator-scripts\") pod \"neutron-8467-account-create-update-tz2bg\" (UID: \"d68b30f6-ed68-4a06-987d-38bb6853c206\") " pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.314789 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e92578e-1d79-49bd-9144-76a8450afc9a-operator-scripts\") pod \"barbican-024e-account-create-update-99mmk\" (UID: \"6e92578e-1d79-49bd-9144-76a8450afc9a\") " pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.315704 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e92578e-1d79-49bd-9144-76a8450afc9a-operator-scripts\") pod \"barbican-024e-account-create-update-99mmk\" (UID: \"6e92578e-1d79-49bd-9144-76a8450afc9a\") " pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.316679 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.316833 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d68b30f6-ed68-4a06-987d-38bb6853c206-operator-scripts\") pod \"neutron-8467-account-create-update-tz2bg\" (UID: \"d68b30f6-ed68-4a06-987d-38bb6853c206\") " pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.335216 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59vp6\" (UniqueName: \"kubernetes.io/projected/d68b30f6-ed68-4a06-987d-38bb6853c206-kube-api-access-59vp6\") pod \"neutron-8467-account-create-update-tz2bg\" (UID: \"d68b30f6-ed68-4a06-987d-38bb6853c206\") " pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.347476 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh52j\" (UniqueName: \"kubernetes.io/projected/6e92578e-1d79-49bd-9144-76a8450afc9a-kube-api-access-dh52j\") pod \"barbican-024e-account-create-update-99mmk\" (UID: \"6e92578e-1d79-49bd-9144-76a8450afc9a\") " pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.410895 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.449934 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4f1d-account-create-update-mh64s"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.462833 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:13 crc kubenswrapper[4959]: W0128 15:37:13.470922 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaff13d88_7544_4442_ab55_4c426ccca467.slice/crio-3199c0ed43ba3dee659949a7ab103c9909b5ae46e81cd847563654cf7ec699c3 WatchSource:0}: Error finding container 3199c0ed43ba3dee659949a7ab103c9909b5ae46e81cd847563654cf7ec699c3: Status 404 returned error can't find the container with id 3199c0ed43ba3dee659949a7ab103c9909b5ae46e81cd847563654cf7ec699c3 Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.668706 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-srp8f"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.761253 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-9q2mr"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.881968 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-bp544-config-b5rlp"] Jan 28 15:37:13 crc kubenswrapper[4959]: I0128 15:37:13.896568 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-bp544-config-b5rlp"] Jan 28 15:37:14 crc kubenswrapper[4959]: I0128 15:37:14.023616 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9q2mr" event={"ID":"c1ba29f6-1e73-4862-ab3f-8590465ff3d6","Type":"ContainerStarted","Data":"019c24d57ca2be640bb2b690144af6cb0f63c54c88d52489a333187d1cbaa443"} Jan 28 15:37:14 crc kubenswrapper[4959]: I0128 15:37:14.025887 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-srp8f" event={"ID":"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f","Type":"ContainerStarted","Data":"a17d5e8451771fba1ade4bdf83e3828efc3de06a8820db2ab4c4a4a7d320f9b6"} Jan 28 15:37:14 crc kubenswrapper[4959]: I0128 15:37:14.028022 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4f1d-account-create-update-mh64s" event={"ID":"aff13d88-7544-4442-ab55-4c426ccca467","Type":"ContainerStarted","Data":"3199c0ed43ba3dee659949a7ab103c9909b5ae46e81cd847563654cf7ec699c3"} Jan 28 15:37:14 crc kubenswrapper[4959]: I0128 15:37:14.051465 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-9zj97"] Jan 28 15:37:14 crc kubenswrapper[4959]: I0128 15:37:14.103805 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8467-account-create-update-tz2bg"] Jan 28 15:37:14 crc kubenswrapper[4959]: I0128 15:37:14.207459 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-024e-account-create-update-99mmk"] Jan 28 15:37:14 crc kubenswrapper[4959]: W0128 15:37:14.220969 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e92578e_1d79_49bd_9144_76a8450afc9a.slice/crio-67c238b7976620b9c6ac1e27f66142854f46b019513705d66e18d470f67cc61e WatchSource:0}: Error finding container 67c238b7976620b9c6ac1e27f66142854f46b019513705d66e18d470f67cc61e: Status 404 returned error can't find the container with id 67c238b7976620b9c6ac1e27f66142854f46b019513705d66e18d470f67cc61e Jan 28 15:37:14 crc kubenswrapper[4959]: I0128 15:37:14.608351 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e500457c-6f13-4151-9491-1e995278fff6" path="/var/lib/kubelet/pods/e500457c-6f13-4151-9491-1e995278fff6/volumes" Jan 28 15:37:15 crc kubenswrapper[4959]: I0128 15:37:15.053814 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-024e-account-create-update-99mmk" event={"ID":"6e92578e-1d79-49bd-9144-76a8450afc9a","Type":"ContainerStarted","Data":"67c238b7976620b9c6ac1e27f66142854f46b019513705d66e18d470f67cc61e"} Jan 28 15:37:15 crc kubenswrapper[4959]: I0128 15:37:15.054663 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9zj97" event={"ID":"3c55bb4f-197b-43bf-9f6b-917aa48aa16a","Type":"ContainerStarted","Data":"d5559665639b241242db6d846106edca87df8d9d9a5c394e88cfd7204d69aaff"} Jan 28 15:37:15 crc kubenswrapper[4959]: I0128 15:37:15.055491 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8467-account-create-update-tz2bg" event={"ID":"d68b30f6-ed68-4a06-987d-38bb6853c206","Type":"ContainerStarted","Data":"f89cfede068f1efb484117a640557848ace88047c062742462324c4ec4d1701b"} Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.074143 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9zj97" event={"ID":"3c55bb4f-197b-43bf-9f6b-917aa48aa16a","Type":"ContainerStarted","Data":"e1e40a678bd8fab77f332b947a341a28c9c526e59a15240c7d1c340d27b6fb88"} Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.075355 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9q2mr" event={"ID":"c1ba29f6-1e73-4862-ab3f-8590465ff3d6","Type":"ContainerStarted","Data":"a71bf81e95dfab1bab7d87392e376f64df726b3d04f48c24e1c8f322448e11a8"} Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.078293 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-srp8f" event={"ID":"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f","Type":"ContainerStarted","Data":"086d4316e9880557f8be658fb04ff2e7aa6d43ee8c792828891836814c477e6f"} Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.079986 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4f1d-account-create-update-mh64s" event={"ID":"aff13d88-7544-4442-ab55-4c426ccca467","Type":"ContainerStarted","Data":"7bc6515ab908db01f9c5cc2cef7ad2ef5e62283b5f60d897a94ec3238322c9b4"} Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.081651 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8467-account-create-update-tz2bg" event={"ID":"d68b30f6-ed68-4a06-987d-38bb6853c206","Type":"ContainerStarted","Data":"fea624a18b30980ef0c4be2fdc1f1fd21b5212ee9079e82fd370f5c06eda676c"} Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.083548 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-024e-account-create-update-99mmk" event={"ID":"6e92578e-1d79-49bd-9144-76a8450afc9a","Type":"ContainerStarted","Data":"aa3ab895593c8ec242aac0140ae28c95b4075609fbe3e4d828ef09986d6c0cc6"} Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.099709 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-9zj97" podStartSLOduration=5.099684841 podStartE2EDuration="5.099684841s" podCreationTimestamp="2026-01-28 15:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:37:17.091042918 +0000 UTC m=+1220.536949301" watchObservedRunningTime="2026-01-28 15:37:17.099684841 +0000 UTC m=+1220.545591214" Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.116024 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-9q2mr" podStartSLOduration=5.115997862 podStartE2EDuration="5.115997862s" podCreationTimestamp="2026-01-28 15:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:37:17.11142932 +0000 UTC m=+1220.557335713" watchObservedRunningTime="2026-01-28 15:37:17.115997862 +0000 UTC m=+1220.561904245" Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.154838 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-4f1d-account-create-update-mh64s" podStartSLOduration=5.154808407 podStartE2EDuration="5.154808407s" podCreationTimestamp="2026-01-28 15:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:37:17.132293763 +0000 UTC m=+1220.578200136" watchObservedRunningTime="2026-01-28 15:37:17.154808407 +0000 UTC m=+1220.600714790" Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.156075 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-024e-account-create-update-99mmk" podStartSLOduration=5.156066668 podStartE2EDuration="5.156066668s" podCreationTimestamp="2026-01-28 15:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:37:17.147972789 +0000 UTC m=+1220.593879182" watchObservedRunningTime="2026-01-28 15:37:17.156066668 +0000 UTC m=+1220.601973041" Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.181158 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-srp8f" podStartSLOduration=5.181139675 podStartE2EDuration="5.181139675s" podCreationTimestamp="2026-01-28 15:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:37:17.179062133 +0000 UTC m=+1220.624968526" watchObservedRunningTime="2026-01-28 15:37:17.181139675 +0000 UTC m=+1220.627046058" Jan 28 15:37:17 crc kubenswrapper[4959]: I0128 15:37:17.201781 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8467-account-create-update-tz2bg" podStartSLOduration=4.201758352 podStartE2EDuration="4.201758352s" podCreationTimestamp="2026-01-28 15:37:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:37:17.193023267 +0000 UTC m=+1220.638929660" watchObservedRunningTime="2026-01-28 15:37:17.201758352 +0000 UTC m=+1220.647664735" Jan 28 15:37:19 crc kubenswrapper[4959]: I0128 15:37:19.107273 4959 generic.go:334] "Generic (PLEG): container finished" podID="c1ba29f6-1e73-4862-ab3f-8590465ff3d6" containerID="a71bf81e95dfab1bab7d87392e376f64df726b3d04f48c24e1c8f322448e11a8" exitCode=0 Jan 28 15:37:19 crc kubenswrapper[4959]: I0128 15:37:19.107404 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9q2mr" event={"ID":"c1ba29f6-1e73-4862-ab3f-8590465ff3d6","Type":"ContainerDied","Data":"a71bf81e95dfab1bab7d87392e376f64df726b3d04f48c24e1c8f322448e11a8"} Jan 28 15:37:19 crc kubenswrapper[4959]: I0128 15:37:19.110654 4959 generic.go:334] "Generic (PLEG): container finished" podID="5f6a647b-8434-4a66-b8b3-ab8e3e7a006f" containerID="086d4316e9880557f8be658fb04ff2e7aa6d43ee8c792828891836814c477e6f" exitCode=0 Jan 28 15:37:19 crc kubenswrapper[4959]: I0128 15:37:19.110712 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-srp8f" event={"ID":"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f","Type":"ContainerDied","Data":"086d4316e9880557f8be658fb04ff2e7aa6d43ee8c792828891836814c477e6f"} Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.518746 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.525295 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.676997 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-operator-scripts\") pod \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\" (UID: \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\") " Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.677239 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nzrp\" (UniqueName: \"kubernetes.io/projected/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-kube-api-access-7nzrp\") pod \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\" (UID: \"c1ba29f6-1e73-4862-ab3f-8590465ff3d6\") " Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.677292 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qq2gb\" (UniqueName: \"kubernetes.io/projected/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-kube-api-access-qq2gb\") pod \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\" (UID: \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\") " Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.677343 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-operator-scripts\") pod \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\" (UID: \"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f\") " Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.678456 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5f6a647b-8434-4a66-b8b3-ab8e3e7a006f" (UID: "5f6a647b-8434-4a66-b8b3-ab8e3e7a006f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.678455 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c1ba29f6-1e73-4862-ab3f-8590465ff3d6" (UID: "c1ba29f6-1e73-4862-ab3f-8590465ff3d6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.685319 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-kube-api-access-qq2gb" (OuterVolumeSpecName: "kube-api-access-qq2gb") pod "5f6a647b-8434-4a66-b8b3-ab8e3e7a006f" (UID: "5f6a647b-8434-4a66-b8b3-ab8e3e7a006f"). InnerVolumeSpecName "kube-api-access-qq2gb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.685890 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-kube-api-access-7nzrp" (OuterVolumeSpecName: "kube-api-access-7nzrp") pod "c1ba29f6-1e73-4862-ab3f-8590465ff3d6" (UID: "c1ba29f6-1e73-4862-ab3f-8590465ff3d6"). InnerVolumeSpecName "kube-api-access-7nzrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.779177 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nzrp\" (UniqueName: \"kubernetes.io/projected/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-kube-api-access-7nzrp\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.779224 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qq2gb\" (UniqueName: \"kubernetes.io/projected/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-kube-api-access-qq2gb\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.779238 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:20 crc kubenswrapper[4959]: I0128 15:37:20.779248 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1ba29f6-1e73-4862-ab3f-8590465ff3d6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:21 crc kubenswrapper[4959]: I0128 15:37:21.134433 4959 generic.go:334] "Generic (PLEG): container finished" podID="3c55bb4f-197b-43bf-9f6b-917aa48aa16a" containerID="e1e40a678bd8fab77f332b947a341a28c9c526e59a15240c7d1c340d27b6fb88" exitCode=0 Jan 28 15:37:21 crc kubenswrapper[4959]: I0128 15:37:21.134538 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9zj97" event={"ID":"3c55bb4f-197b-43bf-9f6b-917aa48aa16a","Type":"ContainerDied","Data":"e1e40a678bd8fab77f332b947a341a28c9c526e59a15240c7d1c340d27b6fb88"} Jan 28 15:37:21 crc kubenswrapper[4959]: I0128 15:37:21.137956 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9q2mr" Jan 28 15:37:21 crc kubenswrapper[4959]: I0128 15:37:21.137931 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9q2mr" event={"ID":"c1ba29f6-1e73-4862-ab3f-8590465ff3d6","Type":"ContainerDied","Data":"019c24d57ca2be640bb2b690144af6cb0f63c54c88d52489a333187d1cbaa443"} Jan 28 15:37:21 crc kubenswrapper[4959]: I0128 15:37:21.138164 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="019c24d57ca2be640bb2b690144af6cb0f63c54c88d52489a333187d1cbaa443" Jan 28 15:37:21 crc kubenswrapper[4959]: I0128 15:37:21.141591 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-srp8f" event={"ID":"5f6a647b-8434-4a66-b8b3-ab8e3e7a006f","Type":"ContainerDied","Data":"a17d5e8451771fba1ade4bdf83e3828efc3de06a8820db2ab4c4a4a7d320f9b6"} Jan 28 15:37:21 crc kubenswrapper[4959]: I0128 15:37:21.141636 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a17d5e8451771fba1ade4bdf83e3828efc3de06a8820db2ab4c4a4a7d320f9b6" Jan 28 15:37:21 crc kubenswrapper[4959]: I0128 15:37:21.141699 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-srp8f" Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.155553 4959 generic.go:334] "Generic (PLEG): container finished" podID="6e92578e-1d79-49bd-9144-76a8450afc9a" containerID="aa3ab895593c8ec242aac0140ae28c95b4075609fbe3e4d828ef09986d6c0cc6" exitCode=0 Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.155637 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-024e-account-create-update-99mmk" event={"ID":"6e92578e-1d79-49bd-9144-76a8450afc9a","Type":"ContainerDied","Data":"aa3ab895593c8ec242aac0140ae28c95b4075609fbe3e4d828ef09986d6c0cc6"} Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.159030 4959 generic.go:334] "Generic (PLEG): container finished" podID="aff13d88-7544-4442-ab55-4c426ccca467" containerID="7bc6515ab908db01f9c5cc2cef7ad2ef5e62283b5f60d897a94ec3238322c9b4" exitCode=0 Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.159098 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4f1d-account-create-update-mh64s" event={"ID":"aff13d88-7544-4442-ab55-4c426ccca467","Type":"ContainerDied","Data":"7bc6515ab908db01f9c5cc2cef7ad2ef5e62283b5f60d897a94ec3238322c9b4"} Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.160940 4959 generic.go:334] "Generic (PLEG): container finished" podID="d68b30f6-ed68-4a06-987d-38bb6853c206" containerID="fea624a18b30980ef0c4be2fdc1f1fd21b5212ee9079e82fd370f5c06eda676c" exitCode=0 Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.161009 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8467-account-create-update-tz2bg" event={"ID":"d68b30f6-ed68-4a06-987d-38bb6853c206","Type":"ContainerDied","Data":"fea624a18b30980ef0c4be2fdc1f1fd21b5212ee9079e82fd370f5c06eda676c"} Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.455833 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.614674 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqvjg\" (UniqueName: \"kubernetes.io/projected/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-kube-api-access-vqvjg\") pod \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\" (UID: \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\") " Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.614784 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-operator-scripts\") pod \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\" (UID: \"3c55bb4f-197b-43bf-9f6b-917aa48aa16a\") " Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.616236 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3c55bb4f-197b-43bf-9f6b-917aa48aa16a" (UID: "3c55bb4f-197b-43bf-9f6b-917aa48aa16a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.625972 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-kube-api-access-vqvjg" (OuterVolumeSpecName: "kube-api-access-vqvjg") pod "3c55bb4f-197b-43bf-9f6b-917aa48aa16a" (UID: "3c55bb4f-197b-43bf-9f6b-917aa48aa16a"). InnerVolumeSpecName "kube-api-access-vqvjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.717355 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqvjg\" (UniqueName: \"kubernetes.io/projected/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-kube-api-access-vqvjg\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:22 crc kubenswrapper[4959]: I0128 15:37:22.717403 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c55bb4f-197b-43bf-9f6b-917aa48aa16a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.181573 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9zj97" event={"ID":"3c55bb4f-197b-43bf-9f6b-917aa48aa16a","Type":"ContainerDied","Data":"d5559665639b241242db6d846106edca87df8d9d9a5c394e88cfd7204d69aaff"} Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.182220 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5559665639b241242db6d846106edca87df8d9d9a5c394e88cfd7204d69aaff" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.182053 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9zj97" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.695535 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.703298 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.722479 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.854251 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dh52j\" (UniqueName: \"kubernetes.io/projected/6e92578e-1d79-49bd-9144-76a8450afc9a-kube-api-access-dh52j\") pod \"6e92578e-1d79-49bd-9144-76a8450afc9a\" (UID: \"6e92578e-1d79-49bd-9144-76a8450afc9a\") " Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.854695 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e92578e-1d79-49bd-9144-76a8450afc9a-operator-scripts\") pod \"6e92578e-1d79-49bd-9144-76a8450afc9a\" (UID: \"6e92578e-1d79-49bd-9144-76a8450afc9a\") " Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.854806 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59vp6\" (UniqueName: \"kubernetes.io/projected/d68b30f6-ed68-4a06-987d-38bb6853c206-kube-api-access-59vp6\") pod \"d68b30f6-ed68-4a06-987d-38bb6853c206\" (UID: \"d68b30f6-ed68-4a06-987d-38bb6853c206\") " Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.854879 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d68b30f6-ed68-4a06-987d-38bb6853c206-operator-scripts\") pod \"d68b30f6-ed68-4a06-987d-38bb6853c206\" (UID: \"d68b30f6-ed68-4a06-987d-38bb6853c206\") " Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.854919 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtf4d\" (UniqueName: \"kubernetes.io/projected/aff13d88-7544-4442-ab55-4c426ccca467-kube-api-access-rtf4d\") pod \"aff13d88-7544-4442-ab55-4c426ccca467\" (UID: \"aff13d88-7544-4442-ab55-4c426ccca467\") " Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.854957 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aff13d88-7544-4442-ab55-4c426ccca467-operator-scripts\") pod \"aff13d88-7544-4442-ab55-4c426ccca467\" (UID: \"aff13d88-7544-4442-ab55-4c426ccca467\") " Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.856237 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d68b30f6-ed68-4a06-987d-38bb6853c206-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d68b30f6-ed68-4a06-987d-38bb6853c206" (UID: "d68b30f6-ed68-4a06-987d-38bb6853c206"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.856251 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aff13d88-7544-4442-ab55-4c426ccca467-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aff13d88-7544-4442-ab55-4c426ccca467" (UID: "aff13d88-7544-4442-ab55-4c426ccca467"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.856736 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e92578e-1d79-49bd-9144-76a8450afc9a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6e92578e-1d79-49bd-9144-76a8450afc9a" (UID: "6e92578e-1d79-49bd-9144-76a8450afc9a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.862080 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e92578e-1d79-49bd-9144-76a8450afc9a-kube-api-access-dh52j" (OuterVolumeSpecName: "kube-api-access-dh52j") pod "6e92578e-1d79-49bd-9144-76a8450afc9a" (UID: "6e92578e-1d79-49bd-9144-76a8450afc9a"). InnerVolumeSpecName "kube-api-access-dh52j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.862794 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aff13d88-7544-4442-ab55-4c426ccca467-kube-api-access-rtf4d" (OuterVolumeSpecName: "kube-api-access-rtf4d") pod "aff13d88-7544-4442-ab55-4c426ccca467" (UID: "aff13d88-7544-4442-ab55-4c426ccca467"). InnerVolumeSpecName "kube-api-access-rtf4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.862868 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d68b30f6-ed68-4a06-987d-38bb6853c206-kube-api-access-59vp6" (OuterVolumeSpecName: "kube-api-access-59vp6") pod "d68b30f6-ed68-4a06-987d-38bb6853c206" (UID: "d68b30f6-ed68-4a06-987d-38bb6853c206"). InnerVolumeSpecName "kube-api-access-59vp6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.957033 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dh52j\" (UniqueName: \"kubernetes.io/projected/6e92578e-1d79-49bd-9144-76a8450afc9a-kube-api-access-dh52j\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.957077 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e92578e-1d79-49bd-9144-76a8450afc9a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.957090 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59vp6\" (UniqueName: \"kubernetes.io/projected/d68b30f6-ed68-4a06-987d-38bb6853c206-kube-api-access-59vp6\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.957100 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d68b30f6-ed68-4a06-987d-38bb6853c206-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.957121 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rtf4d\" (UniqueName: \"kubernetes.io/projected/aff13d88-7544-4442-ab55-4c426ccca467-kube-api-access-rtf4d\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:23 crc kubenswrapper[4959]: I0128 15:37:23.957130 4959 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aff13d88-7544-4442-ab55-4c426ccca467-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.201326 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8467-account-create-update-tz2bg" event={"ID":"d68b30f6-ed68-4a06-987d-38bb6853c206","Type":"ContainerDied","Data":"f89cfede068f1efb484117a640557848ace88047c062742462324c4ec4d1701b"} Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.201378 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f89cfede068f1efb484117a640557848ace88047c062742462324c4ec4d1701b" Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.201412 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8467-account-create-update-tz2bg" Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.206272 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-024e-account-create-update-99mmk" event={"ID":"6e92578e-1d79-49bd-9144-76a8450afc9a","Type":"ContainerDied","Data":"67c238b7976620b9c6ac1e27f66142854f46b019513705d66e18d470f67cc61e"} Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.206313 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67c238b7976620b9c6ac1e27f66142854f46b019513705d66e18d470f67cc61e" Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.206553 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-024e-account-create-update-99mmk" Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.229256 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4f1d-account-create-update-mh64s" event={"ID":"aff13d88-7544-4442-ab55-4c426ccca467","Type":"ContainerDied","Data":"3199c0ed43ba3dee659949a7ab103c9909b5ae46e81cd847563654cf7ec699c3"} Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.229305 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3199c0ed43ba3dee659949a7ab103c9909b5ae46e81cd847563654cf7ec699c3" Jan 28 15:37:24 crc kubenswrapper[4959]: I0128 15:37:24.229459 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4f1d-account-create-update-mh64s" Jan 28 15:37:25 crc kubenswrapper[4959]: I0128 15:37:25.240948 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-l6hzt" event={"ID":"2ac41b31-1ffd-4f8c-b693-7505c6894794","Type":"ContainerStarted","Data":"92761edae36e3513368ca65a4a9724a0761ec368d6f1f791bf5c558f35decd0b"} Jan 28 15:37:25 crc kubenswrapper[4959]: I0128 15:37:25.263807 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-l6hzt" podStartSLOduration=3.611631599 podStartE2EDuration="35.263781839s" podCreationTimestamp="2026-01-28 15:36:50 +0000 UTC" firstStartedPulling="2026-01-28 15:36:52.469782114 +0000 UTC m=+1195.915688497" lastFinishedPulling="2026-01-28 15:37:24.121932354 +0000 UTC m=+1227.567838737" observedRunningTime="2026-01-28 15:37:25.261969125 +0000 UTC m=+1228.707875508" watchObservedRunningTime="2026-01-28 15:37:25.263781839 +0000 UTC m=+1228.709688222" Jan 28 15:37:28 crc kubenswrapper[4959]: I0128 15:37:28.689854 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:37:28 crc kubenswrapper[4959]: I0128 15:37:28.691040 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:37:28 crc kubenswrapper[4959]: I0128 15:37:28.691126 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:37:28 crc kubenswrapper[4959]: I0128 15:37:28.692095 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b44c8493f2114ae61783ae8e4da030e783bddb8b9595a72c10f90d99f7517fe3"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:37:28 crc kubenswrapper[4959]: I0128 15:37:28.692196 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://b44c8493f2114ae61783ae8e4da030e783bddb8b9595a72c10f90d99f7517fe3" gracePeriod=600 Jan 28 15:37:29 crc kubenswrapper[4959]: I0128 15:37:29.299063 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="b44c8493f2114ae61783ae8e4da030e783bddb8b9595a72c10f90d99f7517fe3" exitCode=0 Jan 28 15:37:29 crc kubenswrapper[4959]: I0128 15:37:29.299232 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"b44c8493f2114ae61783ae8e4da030e783bddb8b9595a72c10f90d99f7517fe3"} Jan 28 15:37:29 crc kubenswrapper[4959]: I0128 15:37:29.299587 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"f37a96621a701b52e2c4c9fe1e3769cc979d96d65301975c4af562d003635b74"} Jan 28 15:37:29 crc kubenswrapper[4959]: I0128 15:37:29.299623 4959 scope.go:117] "RemoveContainer" containerID="8e575dc9c25dda36f1b0b8c84111a641d3564e8c98ab1a5fe36fe70b774dfdfc" Jan 28 15:37:43 crc kubenswrapper[4959]: I0128 15:37:43.432569 4959 generic.go:334] "Generic (PLEG): container finished" podID="2ac41b31-1ffd-4f8c-b693-7505c6894794" containerID="92761edae36e3513368ca65a4a9724a0761ec368d6f1f791bf5c558f35decd0b" exitCode=0 Jan 28 15:37:43 crc kubenswrapper[4959]: I0128 15:37:43.432666 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-l6hzt" event={"ID":"2ac41b31-1ffd-4f8c-b693-7505c6894794","Type":"ContainerDied","Data":"92761edae36e3513368ca65a4a9724a0761ec368d6f1f791bf5c558f35decd0b"} Jan 28 15:37:44 crc kubenswrapper[4959]: I0128 15:37:44.999325 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-l6hzt" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.195319 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-config-data\") pod \"2ac41b31-1ffd-4f8c-b693-7505c6894794\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.195409 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-db-sync-config-data\") pod \"2ac41b31-1ffd-4f8c-b693-7505c6894794\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.195545 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77pzh\" (UniqueName: \"kubernetes.io/projected/2ac41b31-1ffd-4f8c-b693-7505c6894794-kube-api-access-77pzh\") pod \"2ac41b31-1ffd-4f8c-b693-7505c6894794\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.195593 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-combined-ca-bundle\") pod \"2ac41b31-1ffd-4f8c-b693-7505c6894794\" (UID: \"2ac41b31-1ffd-4f8c-b693-7505c6894794\") " Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.203323 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2ac41b31-1ffd-4f8c-b693-7505c6894794" (UID: "2ac41b31-1ffd-4f8c-b693-7505c6894794"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.203626 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ac41b31-1ffd-4f8c-b693-7505c6894794-kube-api-access-77pzh" (OuterVolumeSpecName: "kube-api-access-77pzh") pod "2ac41b31-1ffd-4f8c-b693-7505c6894794" (UID: "2ac41b31-1ffd-4f8c-b693-7505c6894794"). InnerVolumeSpecName "kube-api-access-77pzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.224642 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ac41b31-1ffd-4f8c-b693-7505c6894794" (UID: "2ac41b31-1ffd-4f8c-b693-7505c6894794"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.247687 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-config-data" (OuterVolumeSpecName: "config-data") pod "2ac41b31-1ffd-4f8c-b693-7505c6894794" (UID: "2ac41b31-1ffd-4f8c-b693-7505c6894794"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.298250 4959 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.298314 4959 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.298331 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77pzh\" (UniqueName: \"kubernetes.io/projected/2ac41b31-1ffd-4f8c-b693-7505c6894794-kube-api-access-77pzh\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.298346 4959 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ac41b31-1ffd-4f8c-b693-7505c6894794-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.455698 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-l6hzt" event={"ID":"2ac41b31-1ffd-4f8c-b693-7505c6894794","Type":"ContainerDied","Data":"45a5703c55315464541e56cdd5132e4c691e7c890a83fc937cbb72ab32810d31"} Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.455749 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45a5703c55315464541e56cdd5132e4c691e7c890a83fc937cbb72ab32810d31" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.455816 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-l6hzt" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.919435 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-zcgr9"] Jan 28 15:37:45 crc kubenswrapper[4959]: E0128 15:37:45.919878 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1ba29f6-1e73-4862-ab3f-8590465ff3d6" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.919897 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1ba29f6-1e73-4862-ab3f-8590465ff3d6" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: E0128 15:37:45.919921 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d68b30f6-ed68-4a06-987d-38bb6853c206" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.919929 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="d68b30f6-ed68-4a06-987d-38bb6853c206" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: E0128 15:37:45.919942 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aff13d88-7544-4442-ab55-4c426ccca467" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.919949 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="aff13d88-7544-4442-ab55-4c426ccca467" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: E0128 15:37:45.919965 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e92578e-1d79-49bd-9144-76a8450afc9a" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.919972 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e92578e-1d79-49bd-9144-76a8450afc9a" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: E0128 15:37:45.919980 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f6a647b-8434-4a66-b8b3-ab8e3e7a006f" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.919986 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f6a647b-8434-4a66-b8b3-ab8e3e7a006f" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: E0128 15:37:45.919997 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c55bb4f-197b-43bf-9f6b-917aa48aa16a" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920002 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c55bb4f-197b-43bf-9f6b-917aa48aa16a" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: E0128 15:37:45.920012 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac41b31-1ffd-4f8c-b693-7505c6894794" containerName="glance-db-sync" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920017 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac41b31-1ffd-4f8c-b693-7505c6894794" containerName="glance-db-sync" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920185 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1ba29f6-1e73-4862-ab3f-8590465ff3d6" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920196 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="aff13d88-7544-4442-ab55-4c426ccca467" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920204 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="d68b30f6-ed68-4a06-987d-38bb6853c206" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920221 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e92578e-1d79-49bd-9144-76a8450afc9a" containerName="mariadb-account-create-update" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920232 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f6a647b-8434-4a66-b8b3-ab8e3e7a006f" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920241 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c55bb4f-197b-43bf-9f6b-917aa48aa16a" containerName="mariadb-database-create" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.920248 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ac41b31-1ffd-4f8c-b693-7505c6894794" containerName="glance-db-sync" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.921209 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:45 crc kubenswrapper[4959]: I0128 15:37:45.962812 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-zcgr9"] Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.027059 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.028578 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.028732 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvzcs\" (UniqueName: \"kubernetes.io/projected/c30a71cc-6b61-4950-b2a4-895699bc5d8f-kube-api-access-hvzcs\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.036697 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.037035 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-config\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.037153 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.140009 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.140983 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.141168 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvzcs\" (UniqueName: \"kubernetes.io/projected/c30a71cc-6b61-4950-b2a4-895699bc5d8f-kube-api-access-hvzcs\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.141348 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.141540 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-config\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.141676 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.141790 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.142489 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.143229 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.143651 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-config\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.365327 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvzcs\" (UniqueName: \"kubernetes.io/projected/c30a71cc-6b61-4950-b2a4-895699bc5d8f-kube-api-access-hvzcs\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.370362 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c30a71cc-6b61-4950-b2a4-895699bc5d8f-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-zcgr9\" (UID: \"c30a71cc-6b61-4950-b2a4-895699bc5d8f\") " pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.574586 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:46 crc kubenswrapper[4959]: I0128 15:37:46.912723 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-zcgr9"] Jan 28 15:37:46 crc kubenswrapper[4959]: W0128 15:37:46.922006 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc30a71cc_6b61_4950_b2a4_895699bc5d8f.slice/crio-8809ec016ff11bf477e482d92c363f75cf33457fe06df5d2cbf642533e00ad57 WatchSource:0}: Error finding container 8809ec016ff11bf477e482d92c363f75cf33457fe06df5d2cbf642533e00ad57: Status 404 returned error can't find the container with id 8809ec016ff11bf477e482d92c363f75cf33457fe06df5d2cbf642533e00ad57 Jan 28 15:37:47 crc kubenswrapper[4959]: I0128 15:37:47.505625 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" event={"ID":"c30a71cc-6b61-4950-b2a4-895699bc5d8f","Type":"ContainerStarted","Data":"4153b0f2499a695242d97e42501e1eb78b8bf78b684adb459dae53c213f57771"} Jan 28 15:37:47 crc kubenswrapper[4959]: I0128 15:37:47.506023 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" event={"ID":"c30a71cc-6b61-4950-b2a4-895699bc5d8f","Type":"ContainerStarted","Data":"8809ec016ff11bf477e482d92c363f75cf33457fe06df5d2cbf642533e00ad57"} Jan 28 15:37:48 crc kubenswrapper[4959]: I0128 15:37:48.514547 4959 generic.go:334] "Generic (PLEG): container finished" podID="c30a71cc-6b61-4950-b2a4-895699bc5d8f" containerID="4153b0f2499a695242d97e42501e1eb78b8bf78b684adb459dae53c213f57771" exitCode=0 Jan 28 15:37:48 crc kubenswrapper[4959]: I0128 15:37:48.514659 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" event={"ID":"c30a71cc-6b61-4950-b2a4-895699bc5d8f","Type":"ContainerDied","Data":"4153b0f2499a695242d97e42501e1eb78b8bf78b684adb459dae53c213f57771"} Jan 28 15:37:49 crc kubenswrapper[4959]: I0128 15:37:49.526258 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" event={"ID":"c30a71cc-6b61-4950-b2a4-895699bc5d8f","Type":"ContainerStarted","Data":"3bd790846c4d4b8ea64afff279f323953915862eb268bd01f100e16cda1e1958"} Jan 28 15:37:49 crc kubenswrapper[4959]: I0128 15:37:49.526821 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:49 crc kubenswrapper[4959]: I0128 15:37:49.558540 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" podStartSLOduration=4.558515532 podStartE2EDuration="4.558515532s" podCreationTimestamp="2026-01-28 15:37:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 15:37:49.55274658 +0000 UTC m=+1252.998652973" watchObservedRunningTime="2026-01-28 15:37:49.558515532 +0000 UTC m=+1253.004421915" Jan 28 15:37:56 crc kubenswrapper[4959]: I0128 15:37:56.577299 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7ff5475cc9-zcgr9" Jan 28 15:37:56 crc kubenswrapper[4959]: I0128 15:37:56.690434 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-swdsq"] Jan 28 15:37:56 crc kubenswrapper[4959]: I0128 15:37:56.690759 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" podUID="29192ab3-7f28-49be-a60e-b13906e49b10" containerName="dnsmasq-dns" containerID="cri-o://3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49" gracePeriod=10 Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.350206 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.481023 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-sb\") pod \"29192ab3-7f28-49be-a60e-b13906e49b10\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.481337 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwhlx\" (UniqueName: \"kubernetes.io/projected/29192ab3-7f28-49be-a60e-b13906e49b10-kube-api-access-cwhlx\") pod \"29192ab3-7f28-49be-a60e-b13906e49b10\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.481372 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-config\") pod \"29192ab3-7f28-49be-a60e-b13906e49b10\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.481440 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-swift-storage-0\") pod \"29192ab3-7f28-49be-a60e-b13906e49b10\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.481464 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-svc\") pod \"29192ab3-7f28-49be-a60e-b13906e49b10\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.481837 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-nb\") pod \"29192ab3-7f28-49be-a60e-b13906e49b10\" (UID: \"29192ab3-7f28-49be-a60e-b13906e49b10\") " Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.489474 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29192ab3-7f28-49be-a60e-b13906e49b10-kube-api-access-cwhlx" (OuterVolumeSpecName: "kube-api-access-cwhlx") pod "29192ab3-7f28-49be-a60e-b13906e49b10" (UID: "29192ab3-7f28-49be-a60e-b13906e49b10"). InnerVolumeSpecName "kube-api-access-cwhlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.529003 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-config" (OuterVolumeSpecName: "config") pod "29192ab3-7f28-49be-a60e-b13906e49b10" (UID: "29192ab3-7f28-49be-a60e-b13906e49b10"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.535386 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "29192ab3-7f28-49be-a60e-b13906e49b10" (UID: "29192ab3-7f28-49be-a60e-b13906e49b10"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.537994 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "29192ab3-7f28-49be-a60e-b13906e49b10" (UID: "29192ab3-7f28-49be-a60e-b13906e49b10"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.538010 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "29192ab3-7f28-49be-a60e-b13906e49b10" (UID: "29192ab3-7f28-49be-a60e-b13906e49b10"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.547862 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "29192ab3-7f28-49be-a60e-b13906e49b10" (UID: "29192ab3-7f28-49be-a60e-b13906e49b10"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.585292 4959 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.585342 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwhlx\" (UniqueName: \"kubernetes.io/projected/29192ab3-7f28-49be-a60e-b13906e49b10-kube-api-access-cwhlx\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.585356 4959 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-config\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.585369 4959 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.585381 4959 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.585393 4959 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29192ab3-7f28-49be-a60e-b13906e49b10-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.632236 4959 generic.go:334] "Generic (PLEG): container finished" podID="29192ab3-7f28-49be-a60e-b13906e49b10" containerID="3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49" exitCode=0 Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.632302 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" event={"ID":"29192ab3-7f28-49be-a60e-b13906e49b10","Type":"ContainerDied","Data":"3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49"} Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.632327 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.632360 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-swdsq" event={"ID":"29192ab3-7f28-49be-a60e-b13906e49b10","Type":"ContainerDied","Data":"9eae874023f55d92e78794b5ad2e8f0cabcb6c1a7445eaed0a644bf87a1d827a"} Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.632391 4959 scope.go:117] "RemoveContainer" containerID="3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.653271 4959 scope.go:117] "RemoveContainer" containerID="3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.666997 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-swdsq"] Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.674068 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-swdsq"] Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.689638 4959 scope.go:117] "RemoveContainer" containerID="3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49" Jan 28 15:37:57 crc kubenswrapper[4959]: E0128 15:37:57.690456 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49\": container with ID starting with 3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49 not found: ID does not exist" containerID="3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.690519 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49"} err="failed to get container status \"3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49\": rpc error: code = NotFound desc = could not find container \"3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49\": container with ID starting with 3427a8e4f1a66c5fc5864f1983e24e9d8adb8be05be7db01325742439c3ded49 not found: ID does not exist" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.690561 4959 scope.go:117] "RemoveContainer" containerID="3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a" Jan 28 15:37:57 crc kubenswrapper[4959]: E0128 15:37:57.691113 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a\": container with ID starting with 3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a not found: ID does not exist" containerID="3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a" Jan 28 15:37:57 crc kubenswrapper[4959]: I0128 15:37:57.691160 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a"} err="failed to get container status \"3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a\": rpc error: code = NotFound desc = could not find container \"3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a\": container with ID starting with 3b45a3fa4fdb7f778fe8c7e566db72035e66ec1ea2f9d4a4341bcc76e1b1862a not found: ID does not exist" Jan 28 15:37:58 crc kubenswrapper[4959]: I0128 15:37:58.599315 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29192ab3-7f28-49be-a60e-b13906e49b10" path="/var/lib/kubelet/pods/29192ab3-7f28-49be-a60e-b13906e49b10/volumes" Jan 28 15:39:15 crc kubenswrapper[4959]: I0128 15:39:15.871598 4959 scope.go:117] "RemoveContainer" containerID="318da4d729bbd57caae2d80870f34085ab6be46354c638a3720fab7255852552" Jan 28 15:39:15 crc kubenswrapper[4959]: I0128 15:39:15.909021 4959 scope.go:117] "RemoveContainer" containerID="9b8202f595d4971fdb047a2152e021c4de4e75f0d9eed1471f761b7706990d66" Jan 28 15:39:15 crc kubenswrapper[4959]: I0128 15:39:15.933066 4959 scope.go:117] "RemoveContainer" containerID="f9a44e6abfd1e6c2a14f7bc9844fccd5e902cd530c7f009cb8eacb6954b1a600" Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.817864 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xvhwc"] Jan 28 15:39:49 crc kubenswrapper[4959]: E0128 15:39:49.819126 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29192ab3-7f28-49be-a60e-b13906e49b10" containerName="dnsmasq-dns" Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.819142 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="29192ab3-7f28-49be-a60e-b13906e49b10" containerName="dnsmasq-dns" Jan 28 15:39:49 crc kubenswrapper[4959]: E0128 15:39:49.819182 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29192ab3-7f28-49be-a60e-b13906e49b10" containerName="init" Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.819188 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="29192ab3-7f28-49be-a60e-b13906e49b10" containerName="init" Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.819379 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="29192ab3-7f28-49be-a60e-b13906e49b10" containerName="dnsmasq-dns" Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.820641 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.850893 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xvhwc"] Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.953682 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4stz\" (UniqueName: \"kubernetes.io/projected/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-kube-api-access-l4stz\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.953749 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-catalog-content\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:49 crc kubenswrapper[4959]: I0128 15:39:49.953974 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-utilities\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:50 crc kubenswrapper[4959]: I0128 15:39:50.055449 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4stz\" (UniqueName: \"kubernetes.io/projected/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-kube-api-access-l4stz\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:50 crc kubenswrapper[4959]: I0128 15:39:50.055512 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-catalog-content\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:50 crc kubenswrapper[4959]: I0128 15:39:50.055551 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-utilities\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:50 crc kubenswrapper[4959]: I0128 15:39:50.056262 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-catalog-content\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:50 crc kubenswrapper[4959]: I0128 15:39:50.056342 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-utilities\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:50 crc kubenswrapper[4959]: I0128 15:39:50.081964 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4stz\" (UniqueName: \"kubernetes.io/projected/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-kube-api-access-l4stz\") pod \"redhat-operators-xvhwc\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:50 crc kubenswrapper[4959]: I0128 15:39:50.145228 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:39:52 crc kubenswrapper[4959]: I0128 15:39:52.458698 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xvhwc"] Jan 28 15:39:52 crc kubenswrapper[4959]: I0128 15:39:52.702836 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvhwc" event={"ID":"dd57176e-67cc-4f0f-adfc-c1d37e1e3229","Type":"ContainerStarted","Data":"6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300"} Jan 28 15:39:52 crc kubenswrapper[4959]: I0128 15:39:52.703344 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvhwc" event={"ID":"dd57176e-67cc-4f0f-adfc-c1d37e1e3229","Type":"ContainerStarted","Data":"3707897b0953c01c99fca1635ea5d07f2d2046063d62a0d848300e3a81a4ffc1"} Jan 28 15:39:53 crc kubenswrapper[4959]: I0128 15:39:53.715738 4959 generic.go:334] "Generic (PLEG): container finished" podID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerID="6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300" exitCode=0 Jan 28 15:39:53 crc kubenswrapper[4959]: I0128 15:39:53.715814 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvhwc" event={"ID":"dd57176e-67cc-4f0f-adfc-c1d37e1e3229","Type":"ContainerDied","Data":"6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300"} Jan 28 15:39:53 crc kubenswrapper[4959]: I0128 15:39:53.718740 4959 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 15:39:56 crc kubenswrapper[4959]: I0128 15:39:56.747316 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvhwc" event={"ID":"dd57176e-67cc-4f0f-adfc-c1d37e1e3229","Type":"ContainerStarted","Data":"462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314"} Jan 28 15:39:58 crc kubenswrapper[4959]: I0128 15:39:58.689578 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:39:58 crc kubenswrapper[4959]: I0128 15:39:58.690140 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:40:00 crc kubenswrapper[4959]: I0128 15:40:00.782620 4959 generic.go:334] "Generic (PLEG): container finished" podID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerID="462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314" exitCode=0 Jan 28 15:40:00 crc kubenswrapper[4959]: I0128 15:40:00.782695 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvhwc" event={"ID":"dd57176e-67cc-4f0f-adfc-c1d37e1e3229","Type":"ContainerDied","Data":"462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314"} Jan 28 15:40:04 crc kubenswrapper[4959]: I0128 15:40:04.820580 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvhwc" event={"ID":"dd57176e-67cc-4f0f-adfc-c1d37e1e3229","Type":"ContainerStarted","Data":"a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e"} Jan 28 15:40:04 crc kubenswrapper[4959]: I0128 15:40:04.848076 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xvhwc" podStartSLOduration=5.582571693 podStartE2EDuration="15.848052883s" podCreationTimestamp="2026-01-28 15:39:49 +0000 UTC" firstStartedPulling="2026-01-28 15:39:53.718405821 +0000 UTC m=+1377.164312204" lastFinishedPulling="2026-01-28 15:40:03.983887001 +0000 UTC m=+1387.429793394" observedRunningTime="2026-01-28 15:40:04.838740314 +0000 UTC m=+1388.284646697" watchObservedRunningTime="2026-01-28 15:40:04.848052883 +0000 UTC m=+1388.293959266" Jan 28 15:40:10 crc kubenswrapper[4959]: I0128 15:40:10.146031 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:40:10 crc kubenswrapper[4959]: I0128 15:40:10.147482 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:40:10 crc kubenswrapper[4959]: I0128 15:40:10.198237 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:40:10 crc kubenswrapper[4959]: I0128 15:40:10.919070 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:40:11 crc kubenswrapper[4959]: I0128 15:40:11.054710 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xvhwc"] Jan 28 15:40:12 crc kubenswrapper[4959]: I0128 15:40:12.891954 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xvhwc" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerName="registry-server" containerID="cri-o://a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e" gracePeriod=2 Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.340784 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.436942 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-catalog-content\") pod \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.437102 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4stz\" (UniqueName: \"kubernetes.io/projected/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-kube-api-access-l4stz\") pod \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.437254 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-utilities\") pod \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\" (UID: \"dd57176e-67cc-4f0f-adfc-c1d37e1e3229\") " Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.439907 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-utilities" (OuterVolumeSpecName: "utilities") pod "dd57176e-67cc-4f0f-adfc-c1d37e1e3229" (UID: "dd57176e-67cc-4f0f-adfc-c1d37e1e3229"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.448122 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-kube-api-access-l4stz" (OuterVolumeSpecName: "kube-api-access-l4stz") pod "dd57176e-67cc-4f0f-adfc-c1d37e1e3229" (UID: "dd57176e-67cc-4f0f-adfc-c1d37e1e3229"). InnerVolumeSpecName "kube-api-access-l4stz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.540410 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4stz\" (UniqueName: \"kubernetes.io/projected/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-kube-api-access-l4stz\") on node \"crc\" DevicePath \"\"" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.540473 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.573530 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dd57176e-67cc-4f0f-adfc-c1d37e1e3229" (UID: "dd57176e-67cc-4f0f-adfc-c1d37e1e3229"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.642593 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd57176e-67cc-4f0f-adfc-c1d37e1e3229-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.908811 4959 generic.go:334] "Generic (PLEG): container finished" podID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerID="a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e" exitCode=0 Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.908907 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvhwc" event={"ID":"dd57176e-67cc-4f0f-adfc-c1d37e1e3229","Type":"ContainerDied","Data":"a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e"} Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.908974 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xvhwc" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.909006 4959 scope.go:117] "RemoveContainer" containerID="a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.908987 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xvhwc" event={"ID":"dd57176e-67cc-4f0f-adfc-c1d37e1e3229","Type":"ContainerDied","Data":"3707897b0953c01c99fca1635ea5d07f2d2046063d62a0d848300e3a81a4ffc1"} Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.952347 4959 scope.go:117] "RemoveContainer" containerID="462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314" Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.954068 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xvhwc"] Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.966278 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xvhwc"] Jan 28 15:40:13 crc kubenswrapper[4959]: I0128 15:40:13.984544 4959 scope.go:117] "RemoveContainer" containerID="6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300" Jan 28 15:40:14 crc kubenswrapper[4959]: I0128 15:40:14.021340 4959 scope.go:117] "RemoveContainer" containerID="a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e" Jan 28 15:40:14 crc kubenswrapper[4959]: E0128 15:40:14.021947 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e\": container with ID starting with a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e not found: ID does not exist" containerID="a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e" Jan 28 15:40:14 crc kubenswrapper[4959]: I0128 15:40:14.022064 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e"} err="failed to get container status \"a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e\": rpc error: code = NotFound desc = could not find container \"a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e\": container with ID starting with a2acbc8bb82cf433672179904e2d879dd1a0f1ade2703c2404110a1a9144065e not found: ID does not exist" Jan 28 15:40:14 crc kubenswrapper[4959]: I0128 15:40:14.022197 4959 scope.go:117] "RemoveContainer" containerID="462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314" Jan 28 15:40:14 crc kubenswrapper[4959]: E0128 15:40:14.022856 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314\": container with ID starting with 462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314 not found: ID does not exist" containerID="462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314" Jan 28 15:40:14 crc kubenswrapper[4959]: I0128 15:40:14.022908 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314"} err="failed to get container status \"462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314\": rpc error: code = NotFound desc = could not find container \"462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314\": container with ID starting with 462ea55937c301255777627b470bee9bcf546dc4dadbdc8b24ff53795ca01314 not found: ID does not exist" Jan 28 15:40:14 crc kubenswrapper[4959]: I0128 15:40:14.022946 4959 scope.go:117] "RemoveContainer" containerID="6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300" Jan 28 15:40:14 crc kubenswrapper[4959]: E0128 15:40:14.023338 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300\": container with ID starting with 6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300 not found: ID does not exist" containerID="6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300" Jan 28 15:40:14 crc kubenswrapper[4959]: I0128 15:40:14.023367 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300"} err="failed to get container status \"6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300\": rpc error: code = NotFound desc = could not find container \"6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300\": container with ID starting with 6799cbe18c3402c3c318dd338b871a929b61946270671950938cc152d4d93300 not found: ID does not exist" Jan 28 15:40:14 crc kubenswrapper[4959]: I0128 15:40:14.598321 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" path="/var/lib/kubelet/pods/dd57176e-67cc-4f0f-adfc-c1d37e1e3229/volumes" Jan 28 15:40:28 crc kubenswrapper[4959]: I0128 15:40:28.689014 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:40:28 crc kubenswrapper[4959]: I0128 15:40:28.689795 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:40:58 crc kubenswrapper[4959]: I0128 15:40:58.689201 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:40:58 crc kubenswrapper[4959]: I0128 15:40:58.689936 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:40:58 crc kubenswrapper[4959]: I0128 15:40:58.690007 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:40:58 crc kubenswrapper[4959]: I0128 15:40:58.690945 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f37a96621a701b52e2c4c9fe1e3769cc979d96d65301975c4af562d003635b74"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:40:58 crc kubenswrapper[4959]: I0128 15:40:58.691008 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://f37a96621a701b52e2c4c9fe1e3769cc979d96d65301975c4af562d003635b74" gracePeriod=600 Jan 28 15:40:59 crc kubenswrapper[4959]: I0128 15:40:59.332464 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="f37a96621a701b52e2c4c9fe1e3769cc979d96d65301975c4af562d003635b74" exitCode=0 Jan 28 15:40:59 crc kubenswrapper[4959]: I0128 15:40:59.332542 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"f37a96621a701b52e2c4c9fe1e3769cc979d96d65301975c4af562d003635b74"} Jan 28 15:40:59 crc kubenswrapper[4959]: I0128 15:40:59.333303 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228"} Jan 28 15:40:59 crc kubenswrapper[4959]: I0128 15:40:59.333329 4959 scope.go:117] "RemoveContainer" containerID="b44c8493f2114ae61783ae8e4da030e783bddb8b9595a72c10f90d99f7517fe3" Jan 28 15:42:58 crc kubenswrapper[4959]: I0128 15:42:58.689820 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:42:58 crc kubenswrapper[4959]: I0128 15:42:58.690807 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.547702 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s59t7"] Jan 28 15:43:15 crc kubenswrapper[4959]: E0128 15:43:15.549224 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerName="extract-content" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.549243 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerName="extract-content" Jan 28 15:43:15 crc kubenswrapper[4959]: E0128 15:43:15.549275 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerName="registry-server" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.549286 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerName="registry-server" Jan 28 15:43:15 crc kubenswrapper[4959]: E0128 15:43:15.549308 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerName="extract-utilities" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.549322 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerName="extract-utilities" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.549753 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd57176e-67cc-4f0f-adfc-c1d37e1e3229" containerName="registry-server" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.552696 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.561839 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s59t7"] Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.718698 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-catalog-content\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.719343 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrxxh\" (UniqueName: \"kubernetes.io/projected/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-kube-api-access-xrxxh\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.719392 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-utilities\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.821144 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-catalog-content\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.821234 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrxxh\" (UniqueName: \"kubernetes.io/projected/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-kube-api-access-xrxxh\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.821285 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-utilities\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.822029 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-catalog-content\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.822054 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-utilities\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.846217 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrxxh\" (UniqueName: \"kubernetes.io/projected/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-kube-api-access-xrxxh\") pod \"certified-operators-s59t7\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:15 crc kubenswrapper[4959]: I0128 15:43:15.899940 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:16 crc kubenswrapper[4959]: I0128 15:43:16.074308 4959 scope.go:117] "RemoveContainer" containerID="8873f71d1c1deeee4f7bfa8f936db4c0767888a7c270c2755d109434387fdd2c" Jan 28 15:43:16 crc kubenswrapper[4959]: I0128 15:43:16.164626 4959 scope.go:117] "RemoveContainer" containerID="3f932d58d2ecae43f58dd3916c6ce6aa0e8febf7474ecf1f964f43b00ec4dcdc" Jan 28 15:43:16 crc kubenswrapper[4959]: I0128 15:43:16.565161 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s59t7"] Jan 28 15:43:16 crc kubenswrapper[4959]: I0128 15:43:16.623236 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s59t7" event={"ID":"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b","Type":"ContainerStarted","Data":"7a3da33531ba362fd093073615d01745d237ee524773842f03ae33338c8efe37"} Jan 28 15:43:17 crc kubenswrapper[4959]: I0128 15:43:17.635245 4959 generic.go:334] "Generic (PLEG): container finished" podID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerID="f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f" exitCode=0 Jan 28 15:43:17 crc kubenswrapper[4959]: I0128 15:43:17.635303 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s59t7" event={"ID":"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b","Type":"ContainerDied","Data":"f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f"} Jan 28 15:43:19 crc kubenswrapper[4959]: I0128 15:43:19.656330 4959 generic.go:334] "Generic (PLEG): container finished" podID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerID="66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4" exitCode=0 Jan 28 15:43:19 crc kubenswrapper[4959]: I0128 15:43:19.656414 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s59t7" event={"ID":"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b","Type":"ContainerDied","Data":"66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4"} Jan 28 15:43:21 crc kubenswrapper[4959]: I0128 15:43:21.676269 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s59t7" event={"ID":"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b","Type":"ContainerStarted","Data":"b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a"} Jan 28 15:43:21 crc kubenswrapper[4959]: I0128 15:43:21.705388 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s59t7" podStartSLOduration=3.15521829 podStartE2EDuration="6.705358654s" podCreationTimestamp="2026-01-28 15:43:15 +0000 UTC" firstStartedPulling="2026-01-28 15:43:17.637611549 +0000 UTC m=+1581.083517942" lastFinishedPulling="2026-01-28 15:43:21.187751903 +0000 UTC m=+1584.633658306" observedRunningTime="2026-01-28 15:43:21.695818419 +0000 UTC m=+1585.141724832" watchObservedRunningTime="2026-01-28 15:43:21.705358654 +0000 UTC m=+1585.151265037" Jan 28 15:43:25 crc kubenswrapper[4959]: I0128 15:43:25.900862 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:25 crc kubenswrapper[4959]: I0128 15:43:25.902379 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:25 crc kubenswrapper[4959]: I0128 15:43:25.943333 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:26 crc kubenswrapper[4959]: I0128 15:43:26.791496 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:26 crc kubenswrapper[4959]: I0128 15:43:26.847670 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s59t7"] Jan 28 15:43:28 crc kubenswrapper[4959]: I0128 15:43:28.689209 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:43:28 crc kubenswrapper[4959]: I0128 15:43:28.689611 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:43:28 crc kubenswrapper[4959]: I0128 15:43:28.738234 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-s59t7" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerName="registry-server" containerID="cri-o://b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a" gracePeriod=2 Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.309079 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.486307 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-utilities\") pod \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.486526 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-catalog-content\") pod \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.486649 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrxxh\" (UniqueName: \"kubernetes.io/projected/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-kube-api-access-xrxxh\") pod \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\" (UID: \"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b\") " Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.487578 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-utilities" (OuterVolumeSpecName: "utilities") pod "57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" (UID: "57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.497687 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-kube-api-access-xrxxh" (OuterVolumeSpecName: "kube-api-access-xrxxh") pod "57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" (UID: "57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b"). InnerVolumeSpecName "kube-api-access-xrxxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.554295 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" (UID: "57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.588447 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.588480 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrxxh\" (UniqueName: \"kubernetes.io/projected/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-kube-api-access-xrxxh\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.588491 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.751240 4959 generic.go:334] "Generic (PLEG): container finished" podID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerID="b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a" exitCode=0 Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.751308 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s59t7" event={"ID":"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b","Type":"ContainerDied","Data":"b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a"} Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.751351 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s59t7" event={"ID":"57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b","Type":"ContainerDied","Data":"7a3da33531ba362fd093073615d01745d237ee524773842f03ae33338c8efe37"} Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.751377 4959 scope.go:117] "RemoveContainer" containerID="b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.751586 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s59t7" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.792868 4959 scope.go:117] "RemoveContainer" containerID="66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.796062 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s59t7"] Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.805326 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-s59t7"] Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.823019 4959 scope.go:117] "RemoveContainer" containerID="f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.859330 4959 scope.go:117] "RemoveContainer" containerID="b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a" Jan 28 15:43:29 crc kubenswrapper[4959]: E0128 15:43:29.860804 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a\": container with ID starting with b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a not found: ID does not exist" containerID="b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.860844 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a"} err="failed to get container status \"b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a\": rpc error: code = NotFound desc = could not find container \"b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a\": container with ID starting with b1b7d0baa8a76037e07daa598f3bd5b81680c6aa00e30f915959e20cd75a024a not found: ID does not exist" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.860872 4959 scope.go:117] "RemoveContainer" containerID="66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4" Jan 28 15:43:29 crc kubenswrapper[4959]: E0128 15:43:29.861334 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4\": container with ID starting with 66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4 not found: ID does not exist" containerID="66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.861359 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4"} err="failed to get container status \"66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4\": rpc error: code = NotFound desc = could not find container \"66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4\": container with ID starting with 66a727008a6aca4938f6114a27c9dcc1eda96e75c31ddca1bf29aa1fc2cd51e4 not found: ID does not exist" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.861388 4959 scope.go:117] "RemoveContainer" containerID="f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f" Jan 28 15:43:29 crc kubenswrapper[4959]: E0128 15:43:29.861784 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f\": container with ID starting with f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f not found: ID does not exist" containerID="f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f" Jan 28 15:43:29 crc kubenswrapper[4959]: I0128 15:43:29.861822 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f"} err="failed to get container status \"f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f\": rpc error: code = NotFound desc = could not find container \"f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f\": container with ID starting with f71a6e91c60fb7e593919ee39513aa27fde6f59b00eb55fd85317d5bd758549f not found: ID does not exist" Jan 28 15:43:30 crc kubenswrapper[4959]: I0128 15:43:30.600920 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" path="/var/lib/kubelet/pods/57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b/volumes" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.310055 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s6t82"] Jan 28 15:43:33 crc kubenswrapper[4959]: E0128 15:43:33.311040 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerName="extract-utilities" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.311061 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerName="extract-utilities" Jan 28 15:43:33 crc kubenswrapper[4959]: E0128 15:43:33.311093 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerName="extract-content" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.311101 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerName="extract-content" Jan 28 15:43:33 crc kubenswrapper[4959]: E0128 15:43:33.311278 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerName="registry-server" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.311288 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerName="registry-server" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.311498 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="57ac2e82-dd90-47ca-b04b-7e80aa7f9a7b" containerName="registry-server" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.313054 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.325647 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6t82"] Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.464896 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2v8t\" (UniqueName: \"kubernetes.io/projected/4f35b7f7-1898-41af-a80d-3306591ae849-kube-api-access-x2v8t\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.465017 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-catalog-content\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.465178 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-utilities\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.567343 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-utilities\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.567439 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2v8t\" (UniqueName: \"kubernetes.io/projected/4f35b7f7-1898-41af-a80d-3306591ae849-kube-api-access-x2v8t\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.567510 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-catalog-content\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.568047 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-utilities\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.568101 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-catalog-content\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.590620 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2v8t\" (UniqueName: \"kubernetes.io/projected/4f35b7f7-1898-41af-a80d-3306591ae849-kube-api-access-x2v8t\") pod \"redhat-marketplace-s6t82\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:33 crc kubenswrapper[4959]: I0128 15:43:33.635340 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:34 crc kubenswrapper[4959]: I0128 15:43:34.167918 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6t82"] Jan 28 15:43:34 crc kubenswrapper[4959]: I0128 15:43:34.804507 4959 generic.go:334] "Generic (PLEG): container finished" podID="4f35b7f7-1898-41af-a80d-3306591ae849" containerID="58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd" exitCode=0 Jan 28 15:43:34 crc kubenswrapper[4959]: I0128 15:43:34.804601 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6t82" event={"ID":"4f35b7f7-1898-41af-a80d-3306591ae849","Type":"ContainerDied","Data":"58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd"} Jan 28 15:43:34 crc kubenswrapper[4959]: I0128 15:43:34.804986 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6t82" event={"ID":"4f35b7f7-1898-41af-a80d-3306591ae849","Type":"ContainerStarted","Data":"150ea264db5507fa924e90e4ef7f3d854e98b9f0fcfb89a296a72d4dd76b640e"} Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.487886 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xmbsr"] Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.492607 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.506422 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xmbsr"] Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.654336 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn8lf\" (UniqueName: \"kubernetes.io/projected/031c373e-d7fb-42f5-865f-ed448fc83e7d-kube-api-access-cn8lf\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.654412 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-utilities\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.654503 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-catalog-content\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.755782 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-catalog-content\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.755906 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn8lf\" (UniqueName: \"kubernetes.io/projected/031c373e-d7fb-42f5-865f-ed448fc83e7d-kube-api-access-cn8lf\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.755925 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-utilities\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.756426 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-utilities\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.756426 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-catalog-content\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.781961 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn8lf\" (UniqueName: \"kubernetes.io/projected/031c373e-d7fb-42f5-865f-ed448fc83e7d-kube-api-access-cn8lf\") pod \"community-operators-xmbsr\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.828329 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.828773 4959 generic.go:334] "Generic (PLEG): container finished" podID="4f35b7f7-1898-41af-a80d-3306591ae849" containerID="9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49" exitCode=0 Jan 28 15:43:36 crc kubenswrapper[4959]: I0128 15:43:36.828827 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6t82" event={"ID":"4f35b7f7-1898-41af-a80d-3306591ae849","Type":"ContainerDied","Data":"9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49"} Jan 28 15:43:37 crc kubenswrapper[4959]: I0128 15:43:37.183115 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xmbsr"] Jan 28 15:43:37 crc kubenswrapper[4959]: I0128 15:43:37.851376 4959 generic.go:334] "Generic (PLEG): container finished" podID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerID="5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134" exitCode=0 Jan 28 15:43:37 crc kubenswrapper[4959]: I0128 15:43:37.851469 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xmbsr" event={"ID":"031c373e-d7fb-42f5-865f-ed448fc83e7d","Type":"ContainerDied","Data":"5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134"} Jan 28 15:43:37 crc kubenswrapper[4959]: I0128 15:43:37.851760 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xmbsr" event={"ID":"031c373e-d7fb-42f5-865f-ed448fc83e7d","Type":"ContainerStarted","Data":"dad3f662cb3c2798d6e4294eeb1366985479458f201633c339359b0db5573413"} Jan 28 15:43:37 crc kubenswrapper[4959]: I0128 15:43:37.860569 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6t82" event={"ID":"4f35b7f7-1898-41af-a80d-3306591ae849","Type":"ContainerStarted","Data":"09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056"} Jan 28 15:43:37 crc kubenswrapper[4959]: I0128 15:43:37.893539 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s6t82" podStartSLOduration=2.27994769 podStartE2EDuration="4.893512782s" podCreationTimestamp="2026-01-28 15:43:33 +0000 UTC" firstStartedPulling="2026-01-28 15:43:34.80745034 +0000 UTC m=+1598.253356723" lastFinishedPulling="2026-01-28 15:43:37.421015432 +0000 UTC m=+1600.866921815" observedRunningTime="2026-01-28 15:43:37.891038491 +0000 UTC m=+1601.336944884" watchObservedRunningTime="2026-01-28 15:43:37.893512782 +0000 UTC m=+1601.339419165" Jan 28 15:43:39 crc kubenswrapper[4959]: I0128 15:43:39.884964 4959 generic.go:334] "Generic (PLEG): container finished" podID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerID="d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4" exitCode=0 Jan 28 15:43:39 crc kubenswrapper[4959]: I0128 15:43:39.885203 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xmbsr" event={"ID":"031c373e-d7fb-42f5-865f-ed448fc83e7d","Type":"ContainerDied","Data":"d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4"} Jan 28 15:43:40 crc kubenswrapper[4959]: I0128 15:43:40.899188 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xmbsr" event={"ID":"031c373e-d7fb-42f5-865f-ed448fc83e7d","Type":"ContainerStarted","Data":"3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276"} Jan 28 15:43:40 crc kubenswrapper[4959]: I0128 15:43:40.929594 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xmbsr" podStartSLOduration=2.479113587 podStartE2EDuration="4.929570581s" podCreationTimestamp="2026-01-28 15:43:36 +0000 UTC" firstStartedPulling="2026-01-28 15:43:37.855033964 +0000 UTC m=+1601.300940347" lastFinishedPulling="2026-01-28 15:43:40.305490958 +0000 UTC m=+1603.751397341" observedRunningTime="2026-01-28 15:43:40.925345927 +0000 UTC m=+1604.371252320" watchObservedRunningTime="2026-01-28 15:43:40.929570581 +0000 UTC m=+1604.375476964" Jan 28 15:43:43 crc kubenswrapper[4959]: I0128 15:43:43.636239 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:43 crc kubenswrapper[4959]: I0128 15:43:43.636884 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:43 crc kubenswrapper[4959]: I0128 15:43:43.696094 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:43 crc kubenswrapper[4959]: I0128 15:43:43.972322 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:44 crc kubenswrapper[4959]: I0128 15:43:44.880223 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6t82"] Jan 28 15:43:45 crc kubenswrapper[4959]: I0128 15:43:45.944980 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s6t82" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" containerName="registry-server" containerID="cri-o://09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056" gracePeriod=2 Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.468261 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.497898 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-catalog-content\") pod \"4f35b7f7-1898-41af-a80d-3306591ae849\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.498050 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-utilities\") pod \"4f35b7f7-1898-41af-a80d-3306591ae849\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.498251 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2v8t\" (UniqueName: \"kubernetes.io/projected/4f35b7f7-1898-41af-a80d-3306591ae849-kube-api-access-x2v8t\") pod \"4f35b7f7-1898-41af-a80d-3306591ae849\" (UID: \"4f35b7f7-1898-41af-a80d-3306591ae849\") " Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.499094 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-utilities" (OuterVolumeSpecName: "utilities") pod "4f35b7f7-1898-41af-a80d-3306591ae849" (UID: "4f35b7f7-1898-41af-a80d-3306591ae849"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.511079 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f35b7f7-1898-41af-a80d-3306591ae849-kube-api-access-x2v8t" (OuterVolumeSpecName: "kube-api-access-x2v8t") pod "4f35b7f7-1898-41af-a80d-3306591ae849" (UID: "4f35b7f7-1898-41af-a80d-3306591ae849"). InnerVolumeSpecName "kube-api-access-x2v8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.539563 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4f35b7f7-1898-41af-a80d-3306591ae849" (UID: "4f35b7f7-1898-41af-a80d-3306591ae849"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.601209 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2v8t\" (UniqueName: \"kubernetes.io/projected/4f35b7f7-1898-41af-a80d-3306591ae849-kube-api-access-x2v8t\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.601265 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.601278 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f35b7f7-1898-41af-a80d-3306591ae849-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.829387 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.829772 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.882201 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.956419 4959 generic.go:334] "Generic (PLEG): container finished" podID="4f35b7f7-1898-41af-a80d-3306591ae849" containerID="09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056" exitCode=0 Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.957529 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6t82" event={"ID":"4f35b7f7-1898-41af-a80d-3306591ae849","Type":"ContainerDied","Data":"09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056"} Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.957564 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6t82" event={"ID":"4f35b7f7-1898-41af-a80d-3306591ae849","Type":"ContainerDied","Data":"150ea264db5507fa924e90e4ef7f3d854e98b9f0fcfb89a296a72d4dd76b640e"} Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.957585 4959 scope.go:117] "RemoveContainer" containerID="09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.958143 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s6t82" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.987390 4959 scope.go:117] "RemoveContainer" containerID="9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49" Jan 28 15:43:46 crc kubenswrapper[4959]: I0128 15:43:46.996672 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6t82"] Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.007578 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6t82"] Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.007916 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.017841 4959 scope.go:117] "RemoveContainer" containerID="58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd" Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.057389 4959 scope.go:117] "RemoveContainer" containerID="09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056" Jan 28 15:43:47 crc kubenswrapper[4959]: E0128 15:43:47.058157 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056\": container with ID starting with 09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056 not found: ID does not exist" containerID="09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056" Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.058204 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056"} err="failed to get container status \"09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056\": rpc error: code = NotFound desc = could not find container \"09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056\": container with ID starting with 09b0b9975ff078a1c4ec0d27d817ac0b391ee9d312482423bc343dce3b16c056 not found: ID does not exist" Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.058241 4959 scope.go:117] "RemoveContainer" containerID="9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49" Jan 28 15:43:47 crc kubenswrapper[4959]: E0128 15:43:47.058857 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49\": container with ID starting with 9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49 not found: ID does not exist" containerID="9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49" Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.058932 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49"} err="failed to get container status \"9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49\": rpc error: code = NotFound desc = could not find container \"9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49\": container with ID starting with 9889a7dbb296a104b97b7fc9640f83d5fb93aa4f0259a8d27b672bfd8c739f49 not found: ID does not exist" Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.058998 4959 scope.go:117] "RemoveContainer" containerID="58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd" Jan 28 15:43:47 crc kubenswrapper[4959]: E0128 15:43:47.059748 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd\": container with ID starting with 58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd not found: ID does not exist" containerID="58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd" Jan 28 15:43:47 crc kubenswrapper[4959]: I0128 15:43:47.059821 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd"} err="failed to get container status \"58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd\": rpc error: code = NotFound desc = could not find container \"58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd\": container with ID starting with 58673af7270d0de3f8f05d0167e923bd770204bb329a0a749447ec6c55b82fdd not found: ID does not exist" Jan 28 15:43:48 crc kubenswrapper[4959]: I0128 15:43:48.709629 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" path="/var/lib/kubelet/pods/4f35b7f7-1898-41af-a80d-3306591ae849/volumes" Jan 28 15:43:49 crc kubenswrapper[4959]: I0128 15:43:49.879261 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xmbsr"] Jan 28 15:43:50 crc kubenswrapper[4959]: I0128 15:43:50.002674 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xmbsr" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerName="registry-server" containerID="cri-o://3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276" gracePeriod=2 Jan 28 15:43:50 crc kubenswrapper[4959]: I0128 15:43:50.782404 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:50 crc kubenswrapper[4959]: I0128 15:43:50.950909 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-catalog-content\") pod \"031c373e-d7fb-42f5-865f-ed448fc83e7d\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " Jan 28 15:43:50 crc kubenswrapper[4959]: I0128 15:43:50.951087 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cn8lf\" (UniqueName: \"kubernetes.io/projected/031c373e-d7fb-42f5-865f-ed448fc83e7d-kube-api-access-cn8lf\") pod \"031c373e-d7fb-42f5-865f-ed448fc83e7d\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " Jan 28 15:43:50 crc kubenswrapper[4959]: I0128 15:43:50.951155 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-utilities\") pod \"031c373e-d7fb-42f5-865f-ed448fc83e7d\" (UID: \"031c373e-d7fb-42f5-865f-ed448fc83e7d\") " Jan 28 15:43:50 crc kubenswrapper[4959]: I0128 15:43:50.953185 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-utilities" (OuterVolumeSpecName: "utilities") pod "031c373e-d7fb-42f5-865f-ed448fc83e7d" (UID: "031c373e-d7fb-42f5-865f-ed448fc83e7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:43:50 crc kubenswrapper[4959]: I0128 15:43:50.961773 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/031c373e-d7fb-42f5-865f-ed448fc83e7d-kube-api-access-cn8lf" (OuterVolumeSpecName: "kube-api-access-cn8lf") pod "031c373e-d7fb-42f5-865f-ed448fc83e7d" (UID: "031c373e-d7fb-42f5-865f-ed448fc83e7d"). InnerVolumeSpecName "kube-api-access-cn8lf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.019209 4959 generic.go:334] "Generic (PLEG): container finished" podID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerID="3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276" exitCode=0 Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.019280 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xmbsr" event={"ID":"031c373e-d7fb-42f5-865f-ed448fc83e7d","Type":"ContainerDied","Data":"3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276"} Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.019320 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xmbsr" event={"ID":"031c373e-d7fb-42f5-865f-ed448fc83e7d","Type":"ContainerDied","Data":"dad3f662cb3c2798d6e4294eeb1366985479458f201633c339359b0db5573413"} Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.019342 4959 scope.go:117] "RemoveContainer" containerID="3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.019614 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xmbsr" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.043427 4959 scope.go:117] "RemoveContainer" containerID="d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.054039 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cn8lf\" (UniqueName: \"kubernetes.io/projected/031c373e-d7fb-42f5-865f-ed448fc83e7d-kube-api-access-cn8lf\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.054175 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.072283 4959 scope.go:117] "RemoveContainer" containerID="5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.111184 4959 scope.go:117] "RemoveContainer" containerID="3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276" Jan 28 15:43:51 crc kubenswrapper[4959]: E0128 15:43:51.112155 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276\": container with ID starting with 3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276 not found: ID does not exist" containerID="3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.112240 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276"} err="failed to get container status \"3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276\": rpc error: code = NotFound desc = could not find container \"3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276\": container with ID starting with 3776d37e2f8efb0b568d4eddfca7ff7c8fd2ff44fd542a30d027e52ae1d34276 not found: ID does not exist" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.112276 4959 scope.go:117] "RemoveContainer" containerID="d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4" Jan 28 15:43:51 crc kubenswrapper[4959]: E0128 15:43:51.112859 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4\": container with ID starting with d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4 not found: ID does not exist" containerID="d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.112930 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4"} err="failed to get container status \"d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4\": rpc error: code = NotFound desc = could not find container \"d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4\": container with ID starting with d9bc9b2b81725287eab8f9d5c5248439836df433097fb6f4a98fcbf6592532d4 not found: ID does not exist" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.112978 4959 scope.go:117] "RemoveContainer" containerID="5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134" Jan 28 15:43:51 crc kubenswrapper[4959]: E0128 15:43:51.113428 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134\": container with ID starting with 5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134 not found: ID does not exist" containerID="5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.113463 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134"} err="failed to get container status \"5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134\": rpc error: code = NotFound desc = could not find container \"5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134\": container with ID starting with 5b8103e0a5b81ce4da281c9bb66e1bfb30b0769f83bb631da664d819b2989134 not found: ID does not exist" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.405294 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "031c373e-d7fb-42f5-865f-ed448fc83e7d" (UID: "031c373e-d7fb-42f5-865f-ed448fc83e7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.461038 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/031c373e-d7fb-42f5-865f-ed448fc83e7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.658948 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xmbsr"] Jan 28 15:43:51 crc kubenswrapper[4959]: I0128 15:43:51.665788 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xmbsr"] Jan 28 15:43:52 crc kubenswrapper[4959]: I0128 15:43:52.601657 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" path="/var/lib/kubelet/pods/031c373e-d7fb-42f5-865f-ed448fc83e7d/volumes" Jan 28 15:43:58 crc kubenswrapper[4959]: I0128 15:43:58.689856 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:43:58 crc kubenswrapper[4959]: I0128 15:43:58.690747 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:43:58 crc kubenswrapper[4959]: I0128 15:43:58.690827 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:43:58 crc kubenswrapper[4959]: I0128 15:43:58.691832 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:43:58 crc kubenswrapper[4959]: I0128 15:43:58.691905 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" gracePeriod=600 Jan 28 15:43:58 crc kubenswrapper[4959]: E0128 15:43:58.821559 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:43:59 crc kubenswrapper[4959]: I0128 15:43:59.094143 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" exitCode=0 Jan 28 15:43:59 crc kubenswrapper[4959]: I0128 15:43:59.094141 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228"} Jan 28 15:43:59 crc kubenswrapper[4959]: I0128 15:43:59.094229 4959 scope.go:117] "RemoveContainer" containerID="f37a96621a701b52e2c4c9fe1e3769cc979d96d65301975c4af562d003635b74" Jan 28 15:43:59 crc kubenswrapper[4959]: I0128 15:43:59.095034 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:43:59 crc kubenswrapper[4959]: E0128 15:43:59.095328 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:44:13 crc kubenswrapper[4959]: I0128 15:44:13.587505 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:44:13 crc kubenswrapper[4959]: E0128 15:44:13.589063 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:44:26 crc kubenswrapper[4959]: I0128 15:44:26.587234 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:44:26 crc kubenswrapper[4959]: E0128 15:44:26.588152 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:44:38 crc kubenswrapper[4959]: I0128 15:44:38.587307 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:44:38 crc kubenswrapper[4959]: E0128 15:44:38.588334 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:44:52 crc kubenswrapper[4959]: I0128 15:44:52.589832 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:44:52 crc kubenswrapper[4959]: E0128 15:44:52.591161 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.160346 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b"] Jan 28 15:45:00 crc kubenswrapper[4959]: E0128 15:45:00.161603 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerName="extract-content" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.161624 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerName="extract-content" Jan 28 15:45:00 crc kubenswrapper[4959]: E0128 15:45:00.161666 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerName="registry-server" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.161675 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerName="registry-server" Jan 28 15:45:00 crc kubenswrapper[4959]: E0128 15:45:00.161691 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" containerName="extract-content" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.161698 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" containerName="extract-content" Jan 28 15:45:00 crc kubenswrapper[4959]: E0128 15:45:00.161712 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" containerName="registry-server" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.161721 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" containerName="registry-server" Jan 28 15:45:00 crc kubenswrapper[4959]: E0128 15:45:00.161740 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerName="extract-utilities" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.161749 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerName="extract-utilities" Jan 28 15:45:00 crc kubenswrapper[4959]: E0128 15:45:00.161770 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" containerName="extract-utilities" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.161777 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" containerName="extract-utilities" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.161976 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="031c373e-d7fb-42f5-865f-ed448fc83e7d" containerName="registry-server" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.162003 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f35b7f7-1898-41af-a80d-3306591ae849" containerName="registry-server" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.162791 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.167379 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.168541 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.184145 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b"] Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.229403 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxn24\" (UniqueName: \"kubernetes.io/projected/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-kube-api-access-fxn24\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.229887 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-secret-volume\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.230039 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-config-volume\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.332508 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxn24\" (UniqueName: \"kubernetes.io/projected/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-kube-api-access-fxn24\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.333256 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-secret-volume\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.333372 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-config-volume\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.334960 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-config-volume\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.341641 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-secret-volume\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.352602 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxn24\" (UniqueName: \"kubernetes.io/projected/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-kube-api-access-fxn24\") pod \"collect-profiles-29493585-6gr9b\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.494917 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.502194 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:00 crc kubenswrapper[4959]: I0128 15:45:00.990830 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b"] Jan 28 15:45:01 crc kubenswrapper[4959]: I0128 15:45:01.680745 4959 generic.go:334] "Generic (PLEG): container finished" podID="2bd6e6aa-cbd1-435e-9715-11b578d1c83c" containerID="8c739b830033178e9b4170b1d9b58e4cc72f21b8c58a385de44bf7af92120cc4" exitCode=0 Jan 28 15:45:01 crc kubenswrapper[4959]: I0128 15:45:01.681124 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" event={"ID":"2bd6e6aa-cbd1-435e-9715-11b578d1c83c","Type":"ContainerDied","Data":"8c739b830033178e9b4170b1d9b58e4cc72f21b8c58a385de44bf7af92120cc4"} Jan 28 15:45:01 crc kubenswrapper[4959]: I0128 15:45:01.681163 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" event={"ID":"2bd6e6aa-cbd1-435e-9715-11b578d1c83c","Type":"ContainerStarted","Data":"6c4a129e347287b3b9be464b3ea4e1b2a739f9f09d1f2702a616285a9ac130c7"} Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.023905 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.097822 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxn24\" (UniqueName: \"kubernetes.io/projected/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-kube-api-access-fxn24\") pod \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.097994 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-secret-volume\") pod \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.098215 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-config-volume\") pod \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\" (UID: \"2bd6e6aa-cbd1-435e-9715-11b578d1c83c\") " Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.099216 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-config-volume" (OuterVolumeSpecName: "config-volume") pod "2bd6e6aa-cbd1-435e-9715-11b578d1c83c" (UID: "2bd6e6aa-cbd1-435e-9715-11b578d1c83c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.107024 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-kube-api-access-fxn24" (OuterVolumeSpecName: "kube-api-access-fxn24") pod "2bd6e6aa-cbd1-435e-9715-11b578d1c83c" (UID: "2bd6e6aa-cbd1-435e-9715-11b578d1c83c"). InnerVolumeSpecName "kube-api-access-fxn24". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.108687 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2bd6e6aa-cbd1-435e-9715-11b578d1c83c" (UID: "2bd6e6aa-cbd1-435e-9715-11b578d1c83c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.200995 4959 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.201045 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxn24\" (UniqueName: \"kubernetes.io/projected/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-kube-api-access-fxn24\") on node \"crc\" DevicePath \"\"" Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.201059 4959 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2bd6e6aa-cbd1-435e-9715-11b578d1c83c-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.702572 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" event={"ID":"2bd6e6aa-cbd1-435e-9715-11b578d1c83c","Type":"ContainerDied","Data":"6c4a129e347287b3b9be464b3ea4e1b2a739f9f09d1f2702a616285a9ac130c7"} Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.702620 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493585-6gr9b" Jan 28 15:45:03 crc kubenswrapper[4959]: I0128 15:45:03.702625 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c4a129e347287b3b9be464b3ea4e1b2a739f9f09d1f2702a616285a9ac130c7" Jan 28 15:45:07 crc kubenswrapper[4959]: I0128 15:45:07.587508 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:45:07 crc kubenswrapper[4959]: E0128 15:45:07.589434 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:45:19 crc kubenswrapper[4959]: I0128 15:45:19.587529 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:45:19 crc kubenswrapper[4959]: E0128 15:45:19.588678 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:45:33 crc kubenswrapper[4959]: I0128 15:45:33.587436 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:45:33 crc kubenswrapper[4959]: E0128 15:45:33.588460 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:45:44 crc kubenswrapper[4959]: I0128 15:45:44.587828 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:45:44 crc kubenswrapper[4959]: E0128 15:45:44.590466 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:45:55 crc kubenswrapper[4959]: I0128 15:45:55.587164 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:45:55 crc kubenswrapper[4959]: E0128 15:45:55.588311 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:46:10 crc kubenswrapper[4959]: I0128 15:46:10.594092 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:46:10 crc kubenswrapper[4959]: E0128 15:46:10.595222 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:46:24 crc kubenswrapper[4959]: I0128 15:46:24.588075 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:46:24 crc kubenswrapper[4959]: E0128 15:46:24.589572 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:46:35 crc kubenswrapper[4959]: I0128 15:46:35.587413 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:46:35 crc kubenswrapper[4959]: E0128 15:46:35.588560 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:46:42 crc kubenswrapper[4959]: I0128 15:46:42.052873 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-r26g4"] Jan 28 15:46:42 crc kubenswrapper[4959]: I0128 15:46:42.072553 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-r26g4"] Jan 28 15:46:42 crc kubenswrapper[4959]: I0128 15:46:42.600166 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d2916a6-1c00-4636-b1a2-ac651646098f" path="/var/lib/kubelet/pods/0d2916a6-1c00-4636-b1a2-ac651646098f/volumes" Jan 28 15:46:44 crc kubenswrapper[4959]: I0128 15:46:44.032431 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-5cndm"] Jan 28 15:46:44 crc kubenswrapper[4959]: I0128 15:46:44.039654 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-5cndm"] Jan 28 15:46:44 crc kubenswrapper[4959]: I0128 15:46:44.599035 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5766c03-911f-42fc-9e74-83e62a42d6e4" path="/var/lib/kubelet/pods/b5766c03-911f-42fc-9e74-83e62a42d6e4/volumes" Jan 28 15:46:47 crc kubenswrapper[4959]: I0128 15:46:47.041489 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-4wc26"] Jan 28 15:46:47 crc kubenswrapper[4959]: I0128 15:46:47.049525 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-04e3-account-create-update-tqpxc"] Jan 28 15:46:47 crc kubenswrapper[4959]: I0128 15:46:47.057479 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-b8e2-account-create-update-zz4f6"] Jan 28 15:46:47 crc kubenswrapper[4959]: I0128 15:46:47.067050 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-04e3-account-create-update-tqpxc"] Jan 28 15:46:47 crc kubenswrapper[4959]: I0128 15:46:47.074701 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-b8e2-account-create-update-zz4f6"] Jan 28 15:46:47 crc kubenswrapper[4959]: I0128 15:46:47.083464 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-4wc26"] Jan 28 15:46:47 crc kubenswrapper[4959]: I0128 15:46:47.090795 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-5b25-account-create-update-csj55"] Jan 28 15:46:47 crc kubenswrapper[4959]: I0128 15:46:47.097231 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-5b25-account-create-update-csj55"] Jan 28 15:46:48 crc kubenswrapper[4959]: I0128 15:46:48.036092 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-2rc6d"] Jan 28 15:46:48 crc kubenswrapper[4959]: I0128 15:46:48.044344 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-2rc6d"] Jan 28 15:46:48 crc kubenswrapper[4959]: I0128 15:46:48.587414 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:46:48 crc kubenswrapper[4959]: E0128 15:46:48.587932 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:46:48 crc kubenswrapper[4959]: I0128 15:46:48.597266 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f42737f-e000-4c42-8862-9b55c145364c" path="/var/lib/kubelet/pods/1f42737f-e000-4c42-8862-9b55c145364c/volumes" Jan 28 15:46:48 crc kubenswrapper[4959]: I0128 15:46:48.597972 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99812e98-c4ee-439d-aea7-71a1ac4f02fa" path="/var/lib/kubelet/pods/99812e98-c4ee-439d-aea7-71a1ac4f02fa/volumes" Jan 28 15:46:48 crc kubenswrapper[4959]: I0128 15:46:48.598667 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c24cb3a0-a66a-4419-8427-a357605ab17a" path="/var/lib/kubelet/pods/c24cb3a0-a66a-4419-8427-a357605ab17a/volumes" Jan 28 15:46:48 crc kubenswrapper[4959]: I0128 15:46:48.599451 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f055c78c-148d-47e9-847c-99dfe92b152a" path="/var/lib/kubelet/pods/f055c78c-148d-47e9-847c-99dfe92b152a/volumes" Jan 28 15:46:48 crc kubenswrapper[4959]: I0128 15:46:48.600772 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff101780-4112-48ff-a0f8-d5acff017705" path="/var/lib/kubelet/pods/ff101780-4112-48ff-a0f8-d5acff017705/volumes" Jan 28 15:47:03 crc kubenswrapper[4959]: I0128 15:47:03.588616 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:47:03 crc kubenswrapper[4959]: E0128 15:47:03.589772 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:47:16 crc kubenswrapper[4959]: I0128 15:47:16.447548 4959 scope.go:117] "RemoveContainer" containerID="add42293b08ef2fd4803e1b1caea8c3a26154d7ad13f0eb5a209c9ca77cb3082" Jan 28 15:47:16 crc kubenswrapper[4959]: I0128 15:47:16.474591 4959 scope.go:117] "RemoveContainer" containerID="8098bdf7afbc11fda62036cd2b83c7c5cad0334551293fc4331e7c92b9e4f24e" Jan 28 15:47:16 crc kubenswrapper[4959]: I0128 15:47:16.515303 4959 scope.go:117] "RemoveContainer" containerID="5bea39b1e9b6c3ef48152fd709f1d1108ebd5e0ddb6b1fd73e40728607716048" Jan 28 15:47:16 crc kubenswrapper[4959]: I0128 15:47:16.555820 4959 scope.go:117] "RemoveContainer" containerID="4c6a61d0140e6b48d64918bbac4d8a18e32a9c5f8e73799836bec3a5051ea866" Jan 28 15:47:16 crc kubenswrapper[4959]: I0128 15:47:16.583843 4959 scope.go:117] "RemoveContainer" containerID="325bec10a9e1cc2dfaf85077ee96bbe15416280e684927be4f208d49cbe7df1e" Jan 28 15:47:16 crc kubenswrapper[4959]: I0128 15:47:16.590452 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:47:16 crc kubenswrapper[4959]: E0128 15:47:16.590699 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:47:16 crc kubenswrapper[4959]: I0128 15:47:16.632394 4959 scope.go:117] "RemoveContainer" containerID="2c0d09e171cd56d136a483d745796cfc0e46d5482a4de6742b0aafe8d28dcd8c" Jan 28 15:47:16 crc kubenswrapper[4959]: I0128 15:47:16.675987 4959 scope.go:117] "RemoveContainer" containerID="edd7b1c2b8d6fa330502eb5ff2f585726244a751368dad20c1eb2cd4077ae069" Jan 28 15:47:21 crc kubenswrapper[4959]: I0128 15:47:21.072929 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-srp8f"] Jan 28 15:47:21 crc kubenswrapper[4959]: I0128 15:47:21.093066 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-9q2mr"] Jan 28 15:47:21 crc kubenswrapper[4959]: I0128 15:47:21.106067 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-srp8f"] Jan 28 15:47:21 crc kubenswrapper[4959]: I0128 15:47:21.115706 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-9q2mr"] Jan 28 15:47:22 crc kubenswrapper[4959]: I0128 15:47:22.600170 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f6a647b-8434-4a66-b8b3-ab8e3e7a006f" path="/var/lib/kubelet/pods/5f6a647b-8434-4a66-b8b3-ab8e3e7a006f/volumes" Jan 28 15:47:22 crc kubenswrapper[4959]: I0128 15:47:22.601249 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1ba29f6-1e73-4862-ab3f-8590465ff3d6" path="/var/lib/kubelet/pods/c1ba29f6-1e73-4862-ab3f-8590465ff3d6/volumes" Jan 28 15:47:23 crc kubenswrapper[4959]: I0128 15:47:23.032988 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-9zj97"] Jan 28 15:47:23 crc kubenswrapper[4959]: I0128 15:47:23.039576 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-9zj97"] Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.044446 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8467-account-create-update-tz2bg"] Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.052688 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-4f1d-account-create-update-mh64s"] Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.059490 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-024e-account-create-update-99mmk"] Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.066344 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8467-account-create-update-tz2bg"] Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.073978 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-4f1d-account-create-update-mh64s"] Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.081339 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-024e-account-create-update-99mmk"] Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.600328 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c55bb4f-197b-43bf-9f6b-917aa48aa16a" path="/var/lib/kubelet/pods/3c55bb4f-197b-43bf-9f6b-917aa48aa16a/volumes" Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.601558 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e92578e-1d79-49bd-9144-76a8450afc9a" path="/var/lib/kubelet/pods/6e92578e-1d79-49bd-9144-76a8450afc9a/volumes" Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.602296 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aff13d88-7544-4442-ab55-4c426ccca467" path="/var/lib/kubelet/pods/aff13d88-7544-4442-ab55-4c426ccca467/volumes" Jan 28 15:47:24 crc kubenswrapper[4959]: I0128 15:47:24.603068 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d68b30f6-ed68-4a06-987d-38bb6853c206" path="/var/lib/kubelet/pods/d68b30f6-ed68-4a06-987d-38bb6853c206/volumes" Jan 28 15:47:27 crc kubenswrapper[4959]: I0128 15:47:27.588014 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:47:27 crc kubenswrapper[4959]: E0128 15:47:27.588989 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:47:39 crc kubenswrapper[4959]: I0128 15:47:39.587685 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:47:39 crc kubenswrapper[4959]: E0128 15:47:39.588774 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:47:45 crc kubenswrapper[4959]: I0128 15:47:45.081357 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-l6hzt"] Jan 28 15:47:45 crc kubenswrapper[4959]: I0128 15:47:45.091747 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-l6hzt"] Jan 28 15:47:46 crc kubenswrapper[4959]: I0128 15:47:46.598590 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ac41b31-1ffd-4f8c-b693-7505c6894794" path="/var/lib/kubelet/pods/2ac41b31-1ffd-4f8c-b693-7505c6894794/volumes" Jan 28 15:47:54 crc kubenswrapper[4959]: I0128 15:47:54.587772 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:47:54 crc kubenswrapper[4959]: E0128 15:47:54.588792 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:48:05 crc kubenswrapper[4959]: I0128 15:48:05.586697 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:48:05 crc kubenswrapper[4959]: E0128 15:48:05.587644 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:48:16 crc kubenswrapper[4959]: I0128 15:48:16.588438 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:48:16 crc kubenswrapper[4959]: E0128 15:48:16.591144 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:48:16 crc kubenswrapper[4959]: I0128 15:48:16.834413 4959 scope.go:117] "RemoveContainer" containerID="e1e40a678bd8fab77f332b947a341a28c9c526e59a15240c7d1c340d27b6fb88" Jan 28 15:48:16 crc kubenswrapper[4959]: I0128 15:48:16.871136 4959 scope.go:117] "RemoveContainer" containerID="086d4316e9880557f8be658fb04ff2e7aa6d43ee8c792828891836814c477e6f" Jan 28 15:48:16 crc kubenswrapper[4959]: I0128 15:48:16.907223 4959 scope.go:117] "RemoveContainer" containerID="a71bf81e95dfab1bab7d87392e376f64df726b3d04f48c24e1c8f322448e11a8" Jan 28 15:48:16 crc kubenswrapper[4959]: I0128 15:48:16.949513 4959 scope.go:117] "RemoveContainer" containerID="aa3ab895593c8ec242aac0140ae28c95b4075609fbe3e4d828ef09986d6c0cc6" Jan 28 15:48:16 crc kubenswrapper[4959]: I0128 15:48:16.988647 4959 scope.go:117] "RemoveContainer" containerID="92761edae36e3513368ca65a4a9724a0761ec368d6f1f791bf5c558f35decd0b" Jan 28 15:48:17 crc kubenswrapper[4959]: I0128 15:48:17.074205 4959 scope.go:117] "RemoveContainer" containerID="7bc6515ab908db01f9c5cc2cef7ad2ef5e62283b5f60d897a94ec3238322c9b4" Jan 28 15:48:17 crc kubenswrapper[4959]: I0128 15:48:17.101880 4959 scope.go:117] "RemoveContainer" containerID="fea624a18b30980ef0c4be2fdc1f1fd21b5212ee9079e82fd370f5c06eda676c" Jan 28 15:48:30 crc kubenswrapper[4959]: I0128 15:48:30.593385 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:48:30 crc kubenswrapper[4959]: E0128 15:48:30.594279 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:48:43 crc kubenswrapper[4959]: I0128 15:48:43.587463 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:48:43 crc kubenswrapper[4959]: E0128 15:48:43.588545 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:48:55 crc kubenswrapper[4959]: I0128 15:48:55.587818 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:48:55 crc kubenswrapper[4959]: E0128 15:48:55.590517 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:49:10 crc kubenswrapper[4959]: I0128 15:49:10.593936 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:49:11 crc kubenswrapper[4959]: I0128 15:49:11.093985 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"22121dbcb9d9cee2eabd673d13848649bff874fee6e5f430ed34bfc092380d19"} Jan 28 15:51:28 crc kubenswrapper[4959]: I0128 15:51:28.689187 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:51:28 crc kubenswrapper[4959]: I0128 15:51:28.689893 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:51:58 crc kubenswrapper[4959]: I0128 15:51:58.689454 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:51:58 crc kubenswrapper[4959]: I0128 15:51:58.692144 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:52:28 crc kubenswrapper[4959]: I0128 15:52:28.689437 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:52:28 crc kubenswrapper[4959]: I0128 15:52:28.690241 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:52:28 crc kubenswrapper[4959]: I0128 15:52:28.690318 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:52:28 crc kubenswrapper[4959]: I0128 15:52:28.691294 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"22121dbcb9d9cee2eabd673d13848649bff874fee6e5f430ed34bfc092380d19"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:52:28 crc kubenswrapper[4959]: I0128 15:52:28.691370 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://22121dbcb9d9cee2eabd673d13848649bff874fee6e5f430ed34bfc092380d19" gracePeriod=600 Jan 28 15:52:28 crc kubenswrapper[4959]: I0128 15:52:28.876544 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="22121dbcb9d9cee2eabd673d13848649bff874fee6e5f430ed34bfc092380d19" exitCode=0 Jan 28 15:52:28 crc kubenswrapper[4959]: I0128 15:52:28.876890 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"22121dbcb9d9cee2eabd673d13848649bff874fee6e5f430ed34bfc092380d19"} Jan 28 15:52:28 crc kubenswrapper[4959]: I0128 15:52:28.876936 4959 scope.go:117] "RemoveContainer" containerID="5a117ea751f6b660c8f98e4704d1d233426a401b2b2441c0dc5ab32adf015228" Jan 28 15:52:29 crc kubenswrapper[4959]: I0128 15:52:29.887778 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969"} Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.465329 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nzdvq"] Jan 28 15:53:38 crc kubenswrapper[4959]: E0128 15:53:38.467706 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bd6e6aa-cbd1-435e-9715-11b578d1c83c" containerName="collect-profiles" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.467734 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bd6e6aa-cbd1-435e-9715-11b578d1c83c" containerName="collect-profiles" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.468210 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bd6e6aa-cbd1-435e-9715-11b578d1c83c" containerName="collect-profiles" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.471342 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.503947 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nzdvq"] Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.628403 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-utilities\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.628784 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh469\" (UniqueName: \"kubernetes.io/projected/d3bb20e5-f141-404b-9c1f-3d59dd55006c-kube-api-access-kh469\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.629042 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-catalog-content\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.730505 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh469\" (UniqueName: \"kubernetes.io/projected/d3bb20e5-f141-404b-9c1f-3d59dd55006c-kube-api-access-kh469\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.731073 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-catalog-content\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.731301 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-utilities\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.732141 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-utilities\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.732710 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-catalog-content\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.771972 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh469\" (UniqueName: \"kubernetes.io/projected/d3bb20e5-f141-404b-9c1f-3d59dd55006c-kube-api-access-kh469\") pod \"certified-operators-nzdvq\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:38 crc kubenswrapper[4959]: I0128 15:53:38.808814 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:39 crc kubenswrapper[4959]: I0128 15:53:39.308740 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nzdvq"] Jan 28 15:53:39 crc kubenswrapper[4959]: I0128 15:53:39.500817 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzdvq" event={"ID":"d3bb20e5-f141-404b-9c1f-3d59dd55006c","Type":"ContainerStarted","Data":"d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096"} Jan 28 15:53:39 crc kubenswrapper[4959]: I0128 15:53:39.500890 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzdvq" event={"ID":"d3bb20e5-f141-404b-9c1f-3d59dd55006c","Type":"ContainerStarted","Data":"2fb607de8f63f4a9977cbaf7f93994fd1fdc02eb77cb496e71063a3d2e5e7d64"} Jan 28 15:53:40 crc kubenswrapper[4959]: I0128 15:53:40.511227 4959 generic.go:334] "Generic (PLEG): container finished" podID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerID="d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096" exitCode=0 Jan 28 15:53:40 crc kubenswrapper[4959]: I0128 15:53:40.511319 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzdvq" event={"ID":"d3bb20e5-f141-404b-9c1f-3d59dd55006c","Type":"ContainerDied","Data":"d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096"} Jan 28 15:53:40 crc kubenswrapper[4959]: I0128 15:53:40.514914 4959 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 15:53:41 crc kubenswrapper[4959]: I0128 15:53:41.534341 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzdvq" event={"ID":"d3bb20e5-f141-404b-9c1f-3d59dd55006c","Type":"ContainerStarted","Data":"b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949"} Jan 28 15:53:42 crc kubenswrapper[4959]: I0128 15:53:42.546708 4959 generic.go:334] "Generic (PLEG): container finished" podID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerID="b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949" exitCode=0 Jan 28 15:53:42 crc kubenswrapper[4959]: I0128 15:53:42.546767 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzdvq" event={"ID":"d3bb20e5-f141-404b-9c1f-3d59dd55006c","Type":"ContainerDied","Data":"b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949"} Jan 28 15:53:43 crc kubenswrapper[4959]: I0128 15:53:43.557872 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzdvq" event={"ID":"d3bb20e5-f141-404b-9c1f-3d59dd55006c","Type":"ContainerStarted","Data":"0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb"} Jan 28 15:53:43 crc kubenswrapper[4959]: I0128 15:53:43.586363 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nzdvq" podStartSLOduration=3.177548733 podStartE2EDuration="5.586333697s" podCreationTimestamp="2026-01-28 15:53:38 +0000 UTC" firstStartedPulling="2026-01-28 15:53:40.514627549 +0000 UTC m=+2203.960533932" lastFinishedPulling="2026-01-28 15:53:42.923412513 +0000 UTC m=+2206.369318896" observedRunningTime="2026-01-28 15:53:43.581465037 +0000 UTC m=+2207.027371430" watchObservedRunningTime="2026-01-28 15:53:43.586333697 +0000 UTC m=+2207.032240080" Jan 28 15:53:48 crc kubenswrapper[4959]: I0128 15:53:48.810100 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:48 crc kubenswrapper[4959]: I0128 15:53:48.810564 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:48 crc kubenswrapper[4959]: I0128 15:53:48.855284 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:49 crc kubenswrapper[4959]: I0128 15:53:49.655974 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:49 crc kubenswrapper[4959]: I0128 15:53:49.711295 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nzdvq"] Jan 28 15:53:51 crc kubenswrapper[4959]: I0128 15:53:51.627502 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nzdvq" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerName="registry-server" containerID="cri-o://0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb" gracePeriod=2 Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.091758 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.172833 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-catalog-content\") pod \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.173016 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kh469\" (UniqueName: \"kubernetes.io/projected/d3bb20e5-f141-404b-9c1f-3d59dd55006c-kube-api-access-kh469\") pod \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.173062 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-utilities\") pod \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\" (UID: \"d3bb20e5-f141-404b-9c1f-3d59dd55006c\") " Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.174412 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-utilities" (OuterVolumeSpecName: "utilities") pod "d3bb20e5-f141-404b-9c1f-3d59dd55006c" (UID: "d3bb20e5-f141-404b-9c1f-3d59dd55006c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.180139 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3bb20e5-f141-404b-9c1f-3d59dd55006c-kube-api-access-kh469" (OuterVolumeSpecName: "kube-api-access-kh469") pod "d3bb20e5-f141-404b-9c1f-3d59dd55006c" (UID: "d3bb20e5-f141-404b-9c1f-3d59dd55006c"). InnerVolumeSpecName "kube-api-access-kh469". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.275451 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kh469\" (UniqueName: \"kubernetes.io/projected/d3bb20e5-f141-404b-9c1f-3d59dd55006c-kube-api-access-kh469\") on node \"crc\" DevicePath \"\"" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.275483 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.552586 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3bb20e5-f141-404b-9c1f-3d59dd55006c" (UID: "d3bb20e5-f141-404b-9c1f-3d59dd55006c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.580769 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3bb20e5-f141-404b-9c1f-3d59dd55006c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.639513 4959 generic.go:334] "Generic (PLEG): container finished" podID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerID="0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb" exitCode=0 Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.639568 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzdvq" event={"ID":"d3bb20e5-f141-404b-9c1f-3d59dd55006c","Type":"ContainerDied","Data":"0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb"} Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.639614 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzdvq" event={"ID":"d3bb20e5-f141-404b-9c1f-3d59dd55006c","Type":"ContainerDied","Data":"2fb607de8f63f4a9977cbaf7f93994fd1fdc02eb77cb496e71063a3d2e5e7d64"} Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.639633 4959 scope.go:117] "RemoveContainer" containerID="0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.641175 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nzdvq" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.662970 4959 scope.go:117] "RemoveContainer" containerID="b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.672396 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nzdvq"] Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.685699 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nzdvq"] Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.696258 4959 scope.go:117] "RemoveContainer" containerID="d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.728158 4959 scope.go:117] "RemoveContainer" containerID="0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb" Jan 28 15:53:52 crc kubenswrapper[4959]: E0128 15:53:52.728892 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb\": container with ID starting with 0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb not found: ID does not exist" containerID="0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.728942 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb"} err="failed to get container status \"0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb\": rpc error: code = NotFound desc = could not find container \"0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb\": container with ID starting with 0c86fdd22104c9a19994cec74579e65474724816895765d5dffad7fa11c38adb not found: ID does not exist" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.729014 4959 scope.go:117] "RemoveContainer" containerID="b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949" Jan 28 15:53:52 crc kubenswrapper[4959]: E0128 15:53:52.729458 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949\": container with ID starting with b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949 not found: ID does not exist" containerID="b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.729511 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949"} err="failed to get container status \"b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949\": rpc error: code = NotFound desc = could not find container \"b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949\": container with ID starting with b6893aa584986c93b65c5340b65106e9a04d9f4a974c9545109de1764e49c949 not found: ID does not exist" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.729551 4959 scope.go:117] "RemoveContainer" containerID="d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096" Jan 28 15:53:52 crc kubenswrapper[4959]: E0128 15:53:52.729968 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096\": container with ID starting with d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096 not found: ID does not exist" containerID="d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096" Jan 28 15:53:52 crc kubenswrapper[4959]: I0128 15:53:52.730009 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096"} err="failed to get container status \"d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096\": rpc error: code = NotFound desc = could not find container \"d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096\": container with ID starting with d1785262ca68d4b4a0e6c3e32dbcff20678c6b24c76afa0a818fdc28148bb096 not found: ID does not exist" Jan 28 15:53:54 crc kubenswrapper[4959]: I0128 15:53:54.612419 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" path="/var/lib/kubelet/pods/d3bb20e5-f141-404b-9c1f-3d59dd55006c/volumes" Jan 28 15:54:28 crc kubenswrapper[4959]: I0128 15:54:28.689513 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:54:28 crc kubenswrapper[4959]: I0128 15:54:28.690087 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.886309 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g7mpk"] Jan 28 15:54:38 crc kubenswrapper[4959]: E0128 15:54:38.887242 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerName="extract-content" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.887257 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerName="extract-content" Jan 28 15:54:38 crc kubenswrapper[4959]: E0128 15:54:38.887267 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerName="extract-utilities" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.887274 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerName="extract-utilities" Jan 28 15:54:38 crc kubenswrapper[4959]: E0128 15:54:38.887302 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerName="registry-server" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.887308 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerName="registry-server" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.889882 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3bb20e5-f141-404b-9c1f-3d59dd55006c" containerName="registry-server" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.893928 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.899889 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g7mpk"] Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.973223 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-utilities\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.973295 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmhms\" (UniqueName: \"kubernetes.io/projected/3c13e77c-2cb2-4b82-9001-602af5c0ec51-kube-api-access-kmhms\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:38 crc kubenswrapper[4959]: I0128 15:54:38.973421 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-catalog-content\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:39 crc kubenswrapper[4959]: I0128 15:54:39.075075 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-catalog-content\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:39 crc kubenswrapper[4959]: I0128 15:54:39.075229 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-utilities\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:39 crc kubenswrapper[4959]: I0128 15:54:39.075266 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmhms\" (UniqueName: \"kubernetes.io/projected/3c13e77c-2cb2-4b82-9001-602af5c0ec51-kube-api-access-kmhms\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:39 crc kubenswrapper[4959]: I0128 15:54:39.075961 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-catalog-content\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:39 crc kubenswrapper[4959]: I0128 15:54:39.076121 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-utilities\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:39 crc kubenswrapper[4959]: I0128 15:54:39.102323 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmhms\" (UniqueName: \"kubernetes.io/projected/3c13e77c-2cb2-4b82-9001-602af5c0ec51-kube-api-access-kmhms\") pod \"community-operators-g7mpk\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:39 crc kubenswrapper[4959]: I0128 15:54:39.223965 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:39 crc kubenswrapper[4959]: I0128 15:54:39.734252 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g7mpk"] Jan 28 15:54:40 crc kubenswrapper[4959]: I0128 15:54:40.061910 4959 generic.go:334] "Generic (PLEG): container finished" podID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerID="9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215" exitCode=0 Jan 28 15:54:40 crc kubenswrapper[4959]: I0128 15:54:40.061979 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7mpk" event={"ID":"3c13e77c-2cb2-4b82-9001-602af5c0ec51","Type":"ContainerDied","Data":"9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215"} Jan 28 15:54:40 crc kubenswrapper[4959]: I0128 15:54:40.062024 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7mpk" event={"ID":"3c13e77c-2cb2-4b82-9001-602af5c0ec51","Type":"ContainerStarted","Data":"c1826f28e4c7b27b0821679cd5d6bc1586966224afe914850afb4d6122bf5bb2"} Jan 28 15:54:41 crc kubenswrapper[4959]: I0128 15:54:41.071978 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7mpk" event={"ID":"3c13e77c-2cb2-4b82-9001-602af5c0ec51","Type":"ContainerStarted","Data":"6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e"} Jan 28 15:54:42 crc kubenswrapper[4959]: I0128 15:54:42.086004 4959 generic.go:334] "Generic (PLEG): container finished" podID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerID="6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e" exitCode=0 Jan 28 15:54:42 crc kubenswrapper[4959]: I0128 15:54:42.086086 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7mpk" event={"ID":"3c13e77c-2cb2-4b82-9001-602af5c0ec51","Type":"ContainerDied","Data":"6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e"} Jan 28 15:54:43 crc kubenswrapper[4959]: I0128 15:54:43.107165 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7mpk" event={"ID":"3c13e77c-2cb2-4b82-9001-602af5c0ec51","Type":"ContainerStarted","Data":"42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab"} Jan 28 15:54:49 crc kubenswrapper[4959]: I0128 15:54:49.224487 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:49 crc kubenswrapper[4959]: I0128 15:54:49.225081 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:49 crc kubenswrapper[4959]: I0128 15:54:49.268947 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:49 crc kubenswrapper[4959]: I0128 15:54:49.291876 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g7mpk" podStartSLOduration=8.74674465 podStartE2EDuration="11.291855471s" podCreationTimestamp="2026-01-28 15:54:38 +0000 UTC" firstStartedPulling="2026-01-28 15:54:40.06428431 +0000 UTC m=+2263.510190693" lastFinishedPulling="2026-01-28 15:54:42.609395131 +0000 UTC m=+2266.055301514" observedRunningTime="2026-01-28 15:54:43.1486881 +0000 UTC m=+2266.594594483" watchObservedRunningTime="2026-01-28 15:54:49.291855471 +0000 UTC m=+2272.737761854" Jan 28 15:54:49 crc kubenswrapper[4959]: I0128 15:54:49.355794 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:49 crc kubenswrapper[4959]: I0128 15:54:49.508698 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g7mpk"] Jan 28 15:54:51 crc kubenswrapper[4959]: I0128 15:54:51.327732 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g7mpk" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerName="registry-server" containerID="cri-o://42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab" gracePeriod=2 Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.279947 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.351747 4959 generic.go:334] "Generic (PLEG): container finished" podID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerID="42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab" exitCode=0 Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.351815 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7mpk" event={"ID":"3c13e77c-2cb2-4b82-9001-602af5c0ec51","Type":"ContainerDied","Data":"42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab"} Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.351889 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g7mpk" event={"ID":"3c13e77c-2cb2-4b82-9001-602af5c0ec51","Type":"ContainerDied","Data":"c1826f28e4c7b27b0821679cd5d6bc1586966224afe914850afb4d6122bf5bb2"} Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.351932 4959 scope.go:117] "RemoveContainer" containerID="42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.351974 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g7mpk" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.379906 4959 scope.go:117] "RemoveContainer" containerID="6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.402266 4959 scope.go:117] "RemoveContainer" containerID="9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.438734 4959 scope.go:117] "RemoveContainer" containerID="42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab" Jan 28 15:54:52 crc kubenswrapper[4959]: E0128 15:54:52.439210 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab\": container with ID starting with 42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab not found: ID does not exist" containerID="42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.439255 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab"} err="failed to get container status \"42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab\": rpc error: code = NotFound desc = could not find container \"42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab\": container with ID starting with 42fcd09d6df12fb465212a5b8e664e110fb0dbcbda097f1649114b4dcaa206ab not found: ID does not exist" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.439285 4959 scope.go:117] "RemoveContainer" containerID="6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e" Jan 28 15:54:52 crc kubenswrapper[4959]: E0128 15:54:52.439647 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e\": container with ID starting with 6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e not found: ID does not exist" containerID="6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.439682 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e"} err="failed to get container status \"6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e\": rpc error: code = NotFound desc = could not find container \"6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e\": container with ID starting with 6d61ef68c3771cfe8ceeb1c9b46b6f2432ca300221691b8b1efb9178760a710e not found: ID does not exist" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.439708 4959 scope.go:117] "RemoveContainer" containerID="9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215" Jan 28 15:54:52 crc kubenswrapper[4959]: E0128 15:54:52.439905 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215\": container with ID starting with 9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215 not found: ID does not exist" containerID="9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.439927 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215"} err="failed to get container status \"9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215\": rpc error: code = NotFound desc = could not find container \"9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215\": container with ID starting with 9cda4a830e498ca4307196318269c5bd554e20ccc53b9753af43ef66545b3215 not found: ID does not exist" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.480230 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-utilities\") pod \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.480304 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-catalog-content\") pod \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.480353 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmhms\" (UniqueName: \"kubernetes.io/projected/3c13e77c-2cb2-4b82-9001-602af5c0ec51-kube-api-access-kmhms\") pod \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\" (UID: \"3c13e77c-2cb2-4b82-9001-602af5c0ec51\") " Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.481593 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-utilities" (OuterVolumeSpecName: "utilities") pod "3c13e77c-2cb2-4b82-9001-602af5c0ec51" (UID: "3c13e77c-2cb2-4b82-9001-602af5c0ec51"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.481817 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.486328 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c13e77c-2cb2-4b82-9001-602af5c0ec51-kube-api-access-kmhms" (OuterVolumeSpecName: "kube-api-access-kmhms") pod "3c13e77c-2cb2-4b82-9001-602af5c0ec51" (UID: "3c13e77c-2cb2-4b82-9001-602af5c0ec51"). InnerVolumeSpecName "kube-api-access-kmhms". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.538136 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3c13e77c-2cb2-4b82-9001-602af5c0ec51" (UID: "3c13e77c-2cb2-4b82-9001-602af5c0ec51"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.582906 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c13e77c-2cb2-4b82-9001-602af5c0ec51-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.582959 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmhms\" (UniqueName: \"kubernetes.io/projected/3c13e77c-2cb2-4b82-9001-602af5c0ec51-kube-api-access-kmhms\") on node \"crc\" DevicePath \"\"" Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.679225 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g7mpk"] Jan 28 15:54:52 crc kubenswrapper[4959]: I0128 15:54:52.687082 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g7mpk"] Jan 28 15:54:54 crc kubenswrapper[4959]: I0128 15:54:54.598805 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" path="/var/lib/kubelet/pods/3c13e77c-2cb2-4b82-9001-602af5c0ec51/volumes" Jan 28 15:54:58 crc kubenswrapper[4959]: I0128 15:54:58.689350 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:54:58 crc kubenswrapper[4959]: I0128 15:54:58.689695 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.371397 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p8mzs"] Jan 28 15:55:19 crc kubenswrapper[4959]: E0128 15:55:19.372582 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerName="registry-server" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.372600 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerName="registry-server" Jan 28 15:55:19 crc kubenswrapper[4959]: E0128 15:55:19.372625 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerName="extract-content" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.372633 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerName="extract-content" Jan 28 15:55:19 crc kubenswrapper[4959]: E0128 15:55:19.372664 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerName="extract-utilities" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.372676 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerName="extract-utilities" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.372912 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c13e77c-2cb2-4b82-9001-602af5c0ec51" containerName="registry-server" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.377396 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.383244 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p8mzs"] Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.384023 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-catalog-content\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.384185 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-utilities\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.384239 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tht87\" (UniqueName: \"kubernetes.io/projected/55295335-b9d0-40fc-84bd-0ead626e2095-kube-api-access-tht87\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.485546 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-utilities\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.485617 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tht87\" (UniqueName: \"kubernetes.io/projected/55295335-b9d0-40fc-84bd-0ead626e2095-kube-api-access-tht87\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.485693 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-catalog-content\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.486175 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-utilities\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.486270 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-catalog-content\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.511648 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tht87\" (UniqueName: \"kubernetes.io/projected/55295335-b9d0-40fc-84bd-0ead626e2095-kube-api-access-tht87\") pod \"redhat-marketplace-p8mzs\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:19 crc kubenswrapper[4959]: I0128 15:55:19.707076 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:20 crc kubenswrapper[4959]: I0128 15:55:20.202666 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p8mzs"] Jan 28 15:55:20 crc kubenswrapper[4959]: I0128 15:55:20.600673 4959 generic.go:334] "Generic (PLEG): container finished" podID="55295335-b9d0-40fc-84bd-0ead626e2095" containerID="59721304ed96ba86893b3193b0cf7046b23342ed0e3b8e8fecad9e4d4a39e826" exitCode=0 Jan 28 15:55:20 crc kubenswrapper[4959]: I0128 15:55:20.603509 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p8mzs" event={"ID":"55295335-b9d0-40fc-84bd-0ead626e2095","Type":"ContainerDied","Data":"59721304ed96ba86893b3193b0cf7046b23342ed0e3b8e8fecad9e4d4a39e826"} Jan 28 15:55:20 crc kubenswrapper[4959]: I0128 15:55:20.603549 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p8mzs" event={"ID":"55295335-b9d0-40fc-84bd-0ead626e2095","Type":"ContainerStarted","Data":"17a8e7694066a34ae48c33090a3acb510389225beb31df40023e21cf8fa0dfcf"} Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.609067 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p8mzs" event={"ID":"55295335-b9d0-40fc-84bd-0ead626e2095","Type":"ContainerStarted","Data":"2dc30083b2a4d9b3ef444d34a890105b5b7696adc807a84f5d164a1cf7300d1e"} Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.779415 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5mbdw"] Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.782566 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.792547 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5mbdw"] Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.821882 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-699r4\" (UniqueName: \"kubernetes.io/projected/7c497c7c-b2c4-4162-bd50-410dfbc61e46-kube-api-access-699r4\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.821984 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c497c7c-b2c4-4162-bd50-410dfbc61e46-utilities\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.822196 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c497c7c-b2c4-4162-bd50-410dfbc61e46-catalog-content\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.923659 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c497c7c-b2c4-4162-bd50-410dfbc61e46-catalog-content\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.923777 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-699r4\" (UniqueName: \"kubernetes.io/projected/7c497c7c-b2c4-4162-bd50-410dfbc61e46-kube-api-access-699r4\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.923818 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c497c7c-b2c4-4162-bd50-410dfbc61e46-utilities\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.924427 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c497c7c-b2c4-4162-bd50-410dfbc61e46-catalog-content\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.924466 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c497c7c-b2c4-4162-bd50-410dfbc61e46-utilities\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:21 crc kubenswrapper[4959]: I0128 15:55:21.945672 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-699r4\" (UniqueName: \"kubernetes.io/projected/7c497c7c-b2c4-4162-bd50-410dfbc61e46-kube-api-access-699r4\") pod \"redhat-operators-5mbdw\" (UID: \"7c497c7c-b2c4-4162-bd50-410dfbc61e46\") " pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:22 crc kubenswrapper[4959]: I0128 15:55:22.102909 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:22 crc kubenswrapper[4959]: I0128 15:55:22.554143 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5mbdw"] Jan 28 15:55:22 crc kubenswrapper[4959]: I0128 15:55:22.629142 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5mbdw" event={"ID":"7c497c7c-b2c4-4162-bd50-410dfbc61e46","Type":"ContainerStarted","Data":"1dcd3d5b6ea2d16e4135802af182dec2a9914a462033777d83d30ad80737f413"} Jan 28 15:55:22 crc kubenswrapper[4959]: I0128 15:55:22.633934 4959 generic.go:334] "Generic (PLEG): container finished" podID="55295335-b9d0-40fc-84bd-0ead626e2095" containerID="2dc30083b2a4d9b3ef444d34a890105b5b7696adc807a84f5d164a1cf7300d1e" exitCode=0 Jan 28 15:55:22 crc kubenswrapper[4959]: I0128 15:55:22.633968 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p8mzs" event={"ID":"55295335-b9d0-40fc-84bd-0ead626e2095","Type":"ContainerDied","Data":"2dc30083b2a4d9b3ef444d34a890105b5b7696adc807a84f5d164a1cf7300d1e"} Jan 28 15:55:23 crc kubenswrapper[4959]: I0128 15:55:23.642715 4959 generic.go:334] "Generic (PLEG): container finished" podID="7c497c7c-b2c4-4162-bd50-410dfbc61e46" containerID="f2918c44ed0554a7a19774dac3f2f1b401ec0494a52fa91feda9aab9152b45e5" exitCode=0 Jan 28 15:55:23 crc kubenswrapper[4959]: I0128 15:55:23.642790 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5mbdw" event={"ID":"7c497c7c-b2c4-4162-bd50-410dfbc61e46","Type":"ContainerDied","Data":"f2918c44ed0554a7a19774dac3f2f1b401ec0494a52fa91feda9aab9152b45e5"} Jan 28 15:55:23 crc kubenswrapper[4959]: I0128 15:55:23.647401 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p8mzs" event={"ID":"55295335-b9d0-40fc-84bd-0ead626e2095","Type":"ContainerStarted","Data":"c26cb580aed16c10b120d238850ce97627c3bd79bed01b7bb67e2ef2ce49ac40"} Jan 28 15:55:28 crc kubenswrapper[4959]: I0128 15:55:28.689476 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 15:55:28 crc kubenswrapper[4959]: I0128 15:55:28.690161 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 15:55:28 crc kubenswrapper[4959]: I0128 15:55:28.690210 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 15:55:28 crc kubenswrapper[4959]: I0128 15:55:28.691015 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 15:55:28 crc kubenswrapper[4959]: I0128 15:55:28.691084 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" gracePeriod=600 Jan 28 15:55:29 crc kubenswrapper[4959]: I0128 15:55:29.705264 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" exitCode=0 Jan 28 15:55:29 crc kubenswrapper[4959]: I0128 15:55:29.705308 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969"} Jan 28 15:55:29 crc kubenswrapper[4959]: I0128 15:55:29.705349 4959 scope.go:117] "RemoveContainer" containerID="22121dbcb9d9cee2eabd673d13848649bff874fee6e5f430ed34bfc092380d19" Jan 28 15:55:29 crc kubenswrapper[4959]: I0128 15:55:29.707652 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:29 crc kubenswrapper[4959]: I0128 15:55:29.708547 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:29 crc kubenswrapper[4959]: I0128 15:55:29.757022 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:29 crc kubenswrapper[4959]: I0128 15:55:29.776038 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p8mzs" podStartSLOduration=8.320421483 podStartE2EDuration="10.776017407s" podCreationTimestamp="2026-01-28 15:55:19 +0000 UTC" firstStartedPulling="2026-01-28 15:55:20.603724293 +0000 UTC m=+2304.049630676" lastFinishedPulling="2026-01-28 15:55:23.059320217 +0000 UTC m=+2306.505226600" observedRunningTime="2026-01-28 15:55:23.699545043 +0000 UTC m=+2307.145451446" watchObservedRunningTime="2026-01-28 15:55:29.776017407 +0000 UTC m=+2313.221923790" Jan 28 15:55:30 crc kubenswrapper[4959]: I0128 15:55:30.772040 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:30 crc kubenswrapper[4959]: I0128 15:55:30.825294 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p8mzs"] Jan 28 15:55:32 crc kubenswrapper[4959]: E0128 15:55:32.500705 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:55:32 crc kubenswrapper[4959]: I0128 15:55:32.728020 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p8mzs" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" containerName="registry-server" containerID="cri-o://c26cb580aed16c10b120d238850ce97627c3bd79bed01b7bb67e2ef2ce49ac40" gracePeriod=2 Jan 28 15:55:32 crc kubenswrapper[4959]: I0128 15:55:32.728728 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:55:32 crc kubenswrapper[4959]: E0128 15:55:32.729148 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:55:33 crc kubenswrapper[4959]: I0128 15:55:33.807261 4959 generic.go:334] "Generic (PLEG): container finished" podID="55295335-b9d0-40fc-84bd-0ead626e2095" containerID="c26cb580aed16c10b120d238850ce97627c3bd79bed01b7bb67e2ef2ce49ac40" exitCode=0 Jan 28 15:55:33 crc kubenswrapper[4959]: I0128 15:55:33.807407 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p8mzs" event={"ID":"55295335-b9d0-40fc-84bd-0ead626e2095","Type":"ContainerDied","Data":"c26cb580aed16c10b120d238850ce97627c3bd79bed01b7bb67e2ef2ce49ac40"} Jan 28 15:55:33 crc kubenswrapper[4959]: I0128 15:55:33.943747 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.066005 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tht87\" (UniqueName: \"kubernetes.io/projected/55295335-b9d0-40fc-84bd-0ead626e2095-kube-api-access-tht87\") pod \"55295335-b9d0-40fc-84bd-0ead626e2095\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.066052 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-utilities\") pod \"55295335-b9d0-40fc-84bd-0ead626e2095\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.066117 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-catalog-content\") pod \"55295335-b9d0-40fc-84bd-0ead626e2095\" (UID: \"55295335-b9d0-40fc-84bd-0ead626e2095\") " Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.067347 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-utilities" (OuterVolumeSpecName: "utilities") pod "55295335-b9d0-40fc-84bd-0ead626e2095" (UID: "55295335-b9d0-40fc-84bd-0ead626e2095"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.079402 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55295335-b9d0-40fc-84bd-0ead626e2095-kube-api-access-tht87" (OuterVolumeSpecName: "kube-api-access-tht87") pod "55295335-b9d0-40fc-84bd-0ead626e2095" (UID: "55295335-b9d0-40fc-84bd-0ead626e2095"). InnerVolumeSpecName "kube-api-access-tht87". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.088568 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55295335-b9d0-40fc-84bd-0ead626e2095" (UID: "55295335-b9d0-40fc-84bd-0ead626e2095"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.168515 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tht87\" (UniqueName: \"kubernetes.io/projected/55295335-b9d0-40fc-84bd-0ead626e2095-kube-api-access-tht87\") on node \"crc\" DevicePath \"\"" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.168561 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.168570 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55295335-b9d0-40fc-84bd-0ead626e2095-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.815472 4959 generic.go:334] "Generic (PLEG): container finished" podID="7c497c7c-b2c4-4162-bd50-410dfbc61e46" containerID="3aa1dd97c6d38b7c3d260acccded943ad0055916b2e151ad9a885d457f7beede" exitCode=0 Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.815568 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5mbdw" event={"ID":"7c497c7c-b2c4-4162-bd50-410dfbc61e46","Type":"ContainerDied","Data":"3aa1dd97c6d38b7c3d260acccded943ad0055916b2e151ad9a885d457f7beede"} Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.818214 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p8mzs" event={"ID":"55295335-b9d0-40fc-84bd-0ead626e2095","Type":"ContainerDied","Data":"17a8e7694066a34ae48c33090a3acb510389225beb31df40023e21cf8fa0dfcf"} Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.818268 4959 scope.go:117] "RemoveContainer" containerID="c26cb580aed16c10b120d238850ce97627c3bd79bed01b7bb67e2ef2ce49ac40" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.818351 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p8mzs" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.838630 4959 scope.go:117] "RemoveContainer" containerID="2dc30083b2a4d9b3ef444d34a890105b5b7696adc807a84f5d164a1cf7300d1e" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.865806 4959 scope.go:117] "RemoveContainer" containerID="59721304ed96ba86893b3193b0cf7046b23342ed0e3b8e8fecad9e4d4a39e826" Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.872132 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p8mzs"] Jan 28 15:55:34 crc kubenswrapper[4959]: I0128 15:55:34.879401 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p8mzs"] Jan 28 15:55:36 crc kubenswrapper[4959]: I0128 15:55:36.600844 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" path="/var/lib/kubelet/pods/55295335-b9d0-40fc-84bd-0ead626e2095/volumes" Jan 28 15:55:42 crc kubenswrapper[4959]: I0128 15:55:42.890355 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5mbdw" event={"ID":"7c497c7c-b2c4-4162-bd50-410dfbc61e46","Type":"ContainerStarted","Data":"bb29ed8672053ae22abbf1a50769de5e99584e35515ace3d7190ada984dd0393"} Jan 28 15:55:42 crc kubenswrapper[4959]: I0128 15:55:42.912006 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5mbdw" podStartSLOduration=3.7678693770000002 podStartE2EDuration="21.911987411s" podCreationTimestamp="2026-01-28 15:55:21 +0000 UTC" firstStartedPulling="2026-01-28 15:55:23.645853936 +0000 UTC m=+2307.091760319" lastFinishedPulling="2026-01-28 15:55:41.78997197 +0000 UTC m=+2325.235878353" observedRunningTime="2026-01-28 15:55:42.908830324 +0000 UTC m=+2326.354736737" watchObservedRunningTime="2026-01-28 15:55:42.911987411 +0000 UTC m=+2326.357893794" Jan 28 15:55:44 crc kubenswrapper[4959]: I0128 15:55:44.590600 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:55:44 crc kubenswrapper[4959]: E0128 15:55:44.591228 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:55:52 crc kubenswrapper[4959]: I0128 15:55:52.103699 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:52 crc kubenswrapper[4959]: I0128 15:55:52.104348 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:52 crc kubenswrapper[4959]: I0128 15:55:52.158919 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:53 crc kubenswrapper[4959]: I0128 15:55:53.022733 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5mbdw" Jan 28 15:55:53 crc kubenswrapper[4959]: I0128 15:55:53.110825 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5mbdw"] Jan 28 15:55:53 crc kubenswrapper[4959]: I0128 15:55:53.159428 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fp6bt"] Jan 28 15:55:53 crc kubenswrapper[4959]: I0128 15:55:53.159836 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fp6bt" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="registry-server" containerID="cri-o://41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72" gracePeriod=2 Jan 28 15:55:53 crc kubenswrapper[4959]: E0128 15:55:53.709532 4959 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72 is running failed: container process not found" containerID="41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 15:55:53 crc kubenswrapper[4959]: E0128 15:55:53.710280 4959 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72 is running failed: container process not found" containerID="41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 15:55:53 crc kubenswrapper[4959]: E0128 15:55:53.710760 4959 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72 is running failed: container process not found" containerID="41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72" cmd=["grpc_health_probe","-addr=:50051"] Jan 28 15:55:53 crc kubenswrapper[4959]: E0128 15:55:53.710804 4959 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-fp6bt" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="registry-server" Jan 28 15:55:53 crc kubenswrapper[4959]: I0128 15:55:53.993287 4959 generic.go:334] "Generic (PLEG): container finished" podID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerID="41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72" exitCode=0 Jan 28 15:55:53 crc kubenswrapper[4959]: I0128 15:55:53.994663 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fp6bt" event={"ID":"5b88ba92-b8b0-4949-9885-c22425ad27be","Type":"ContainerDied","Data":"41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72"} Jan 28 15:55:53 crc kubenswrapper[4959]: I0128 15:55:53.994714 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fp6bt" event={"ID":"5b88ba92-b8b0-4949-9885-c22425ad27be","Type":"ContainerDied","Data":"e27bb16abab3ea418f7db47d8393e8bcc6ac25b7d2b1f23d062c8eca0c9d45ce"} Jan 28 15:55:53 crc kubenswrapper[4959]: I0128 15:55:53.994725 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e27bb16abab3ea418f7db47d8393e8bcc6ac25b7d2b1f23d062c8eca0c9d45ce" Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.031775 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.159303 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-utilities\") pod \"5b88ba92-b8b0-4949-9885-c22425ad27be\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.159350 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-catalog-content\") pod \"5b88ba92-b8b0-4949-9885-c22425ad27be\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.159373 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7tcq\" (UniqueName: \"kubernetes.io/projected/5b88ba92-b8b0-4949-9885-c22425ad27be-kube-api-access-w7tcq\") pod \"5b88ba92-b8b0-4949-9885-c22425ad27be\" (UID: \"5b88ba92-b8b0-4949-9885-c22425ad27be\") " Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.160003 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-utilities" (OuterVolumeSpecName: "utilities") pod "5b88ba92-b8b0-4949-9885-c22425ad27be" (UID: "5b88ba92-b8b0-4949-9885-c22425ad27be"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.178605 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88ba92-b8b0-4949-9885-c22425ad27be-kube-api-access-w7tcq" (OuterVolumeSpecName: "kube-api-access-w7tcq") pod "5b88ba92-b8b0-4949-9885-c22425ad27be" (UID: "5b88ba92-b8b0-4949-9885-c22425ad27be"). InnerVolumeSpecName "kube-api-access-w7tcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.261994 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.262051 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7tcq\" (UniqueName: \"kubernetes.io/projected/5b88ba92-b8b0-4949-9885-c22425ad27be-kube-api-access-w7tcq\") on node \"crc\" DevicePath \"\"" Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.278672 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b88ba92-b8b0-4949-9885-c22425ad27be" (UID: "5b88ba92-b8b0-4949-9885-c22425ad27be"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.364081 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b88ba92-b8b0-4949-9885-c22425ad27be-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 15:55:54 crc kubenswrapper[4959]: E0128 15:55:54.774702 4959 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b88ba92_b8b0_4949_9885_c22425ad27be.slice/crio-e27bb16abab3ea418f7db47d8393e8bcc6ac25b7d2b1f23d062c8eca0c9d45ce\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b88ba92_b8b0_4949_9885_c22425ad27be.slice\": RecentStats: unable to find data in memory cache]" Jan 28 15:55:54 crc kubenswrapper[4959]: I0128 15:55:54.999537 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fp6bt" Jan 28 15:55:55 crc kubenswrapper[4959]: I0128 15:55:55.020988 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fp6bt"] Jan 28 15:55:55 crc kubenswrapper[4959]: I0128 15:55:55.029135 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fp6bt"] Jan 28 15:55:56 crc kubenswrapper[4959]: I0128 15:55:56.598485 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" path="/var/lib/kubelet/pods/5b88ba92-b8b0-4949-9885-c22425ad27be/volumes" Jan 28 15:55:58 crc kubenswrapper[4959]: I0128 15:55:58.587520 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:55:58 crc kubenswrapper[4959]: E0128 15:55:58.588177 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:56:10 crc kubenswrapper[4959]: I0128 15:56:10.591266 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:56:10 crc kubenswrapper[4959]: E0128 15:56:10.592128 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:56:17 crc kubenswrapper[4959]: I0128 15:56:17.387912 4959 scope.go:117] "RemoveContainer" containerID="41c5e8a8e043425ee7dc877c2db75e9e1321ce2b4653c418482c600f07b9dd72" Jan 28 15:56:17 crc kubenswrapper[4959]: I0128 15:56:17.413389 4959 scope.go:117] "RemoveContainer" containerID="a1522d18a37a405ce06effb85c30065618ffda51d08bf160be2e97fba025d388" Jan 28 15:56:17 crc kubenswrapper[4959]: I0128 15:56:17.440713 4959 scope.go:117] "RemoveContainer" containerID="d8f3b83f4780b51907d71c992da67a414034ac4926c619784c6ff8849f0f7919" Jan 28 15:56:25 crc kubenswrapper[4959]: I0128 15:56:25.587447 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:56:25 crc kubenswrapper[4959]: E0128 15:56:25.588553 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:56:36 crc kubenswrapper[4959]: I0128 15:56:36.587993 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:56:36 crc kubenswrapper[4959]: E0128 15:56:36.588672 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:56:47 crc kubenswrapper[4959]: I0128 15:56:47.587674 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:56:47 crc kubenswrapper[4959]: E0128 15:56:47.588524 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:57:02 crc kubenswrapper[4959]: I0128 15:57:02.589138 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:57:02 crc kubenswrapper[4959]: E0128 15:57:02.590000 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:57:15 crc kubenswrapper[4959]: I0128 15:57:15.587643 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:57:15 crc kubenswrapper[4959]: E0128 15:57:15.588423 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:57:26 crc kubenswrapper[4959]: I0128 15:57:26.586828 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:57:26 crc kubenswrapper[4959]: E0128 15:57:26.587530 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:57:39 crc kubenswrapper[4959]: I0128 15:57:39.587626 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:57:39 crc kubenswrapper[4959]: E0128 15:57:39.588463 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:57:51 crc kubenswrapper[4959]: I0128 15:57:51.587389 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:57:51 crc kubenswrapper[4959]: E0128 15:57:51.588338 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:58:06 crc kubenswrapper[4959]: I0128 15:58:06.586922 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:58:06 crc kubenswrapper[4959]: E0128 15:58:06.587727 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:58:20 crc kubenswrapper[4959]: I0128 15:58:20.593225 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:58:20 crc kubenswrapper[4959]: E0128 15:58:20.594178 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:58:35 crc kubenswrapper[4959]: I0128 15:58:35.588026 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:58:35 crc kubenswrapper[4959]: E0128 15:58:35.588826 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:58:47 crc kubenswrapper[4959]: I0128 15:58:47.587478 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:58:47 crc kubenswrapper[4959]: E0128 15:58:47.588443 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:58:58 crc kubenswrapper[4959]: I0128 15:58:58.586971 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:58:58 crc kubenswrapper[4959]: E0128 15:58:58.587789 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:59:10 crc kubenswrapper[4959]: I0128 15:59:10.591001 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:59:10 crc kubenswrapper[4959]: E0128 15:59:10.591658 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:59:24 crc kubenswrapper[4959]: I0128 15:59:24.586841 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:59:24 crc kubenswrapper[4959]: E0128 15:59:24.587502 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:59:39 crc kubenswrapper[4959]: I0128 15:59:39.587593 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:59:39 crc kubenswrapper[4959]: E0128 15:59:39.588538 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 15:59:53 crc kubenswrapper[4959]: I0128 15:59:53.587574 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 15:59:53 crc kubenswrapper[4959]: E0128 15:59:53.588267 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.164570 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb"] Jan 28 16:00:00 crc kubenswrapper[4959]: E0128 16:00:00.166958 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="registry-server" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.167094 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="registry-server" Jan 28 16:00:00 crc kubenswrapper[4959]: E0128 16:00:00.167206 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" containerName="extract-utilities" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.167276 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" containerName="extract-utilities" Jan 28 16:00:00 crc kubenswrapper[4959]: E0128 16:00:00.167364 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="extract-utilities" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.167450 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="extract-utilities" Jan 28 16:00:00 crc kubenswrapper[4959]: E0128 16:00:00.167514 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" containerName="extract-content" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.167577 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" containerName="extract-content" Jan 28 16:00:00 crc kubenswrapper[4959]: E0128 16:00:00.167658 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" containerName="registry-server" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.167734 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" containerName="registry-server" Jan 28 16:00:00 crc kubenswrapper[4959]: E0128 16:00:00.167815 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="extract-content" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.167888 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="extract-content" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.168194 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b88ba92-b8b0-4949-9885-c22425ad27be" containerName="registry-server" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.168305 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="55295335-b9d0-40fc-84bd-0ead626e2095" containerName="registry-server" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.169011 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.173311 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.173578 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.208194 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb"] Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.285690 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m9kw\" (UniqueName: \"kubernetes.io/projected/18f0b867-131c-4f6e-a492-f168e974e117-kube-api-access-6m9kw\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.285759 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18f0b867-131c-4f6e-a492-f168e974e117-secret-volume\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.285795 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18f0b867-131c-4f6e-a492-f168e974e117-config-volume\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.387448 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m9kw\" (UniqueName: \"kubernetes.io/projected/18f0b867-131c-4f6e-a492-f168e974e117-kube-api-access-6m9kw\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.387515 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18f0b867-131c-4f6e-a492-f168e974e117-secret-volume\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.387563 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18f0b867-131c-4f6e-a492-f168e974e117-config-volume\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.388665 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18f0b867-131c-4f6e-a492-f168e974e117-config-volume\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.397694 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18f0b867-131c-4f6e-a492-f168e974e117-secret-volume\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.407294 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m9kw\" (UniqueName: \"kubernetes.io/projected/18f0b867-131c-4f6e-a492-f168e974e117-kube-api-access-6m9kw\") pod \"collect-profiles-29493600-c9znb\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.497590 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.505927 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:00 crc kubenswrapper[4959]: I0128 16:00:00.976441 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb"] Jan 28 16:00:01 crc kubenswrapper[4959]: I0128 16:00:01.937810 4959 generic.go:334] "Generic (PLEG): container finished" podID="18f0b867-131c-4f6e-a492-f168e974e117" containerID="b419d9082142b6b243c0bb360d0f0a77a60b57d58bd6575e3116c986d23e104f" exitCode=0 Jan 28 16:00:01 crc kubenswrapper[4959]: I0128 16:00:01.937915 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" event={"ID":"18f0b867-131c-4f6e-a492-f168e974e117","Type":"ContainerDied","Data":"b419d9082142b6b243c0bb360d0f0a77a60b57d58bd6575e3116c986d23e104f"} Jan 28 16:00:01 crc kubenswrapper[4959]: I0128 16:00:01.938172 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" event={"ID":"18f0b867-131c-4f6e-a492-f168e974e117","Type":"ContainerStarted","Data":"1c173a97f52be69c6cca73b58fec1c34fa27c1f68e096ca8eac6e5ac620c9aa6"} Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.248429 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.339056 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18f0b867-131c-4f6e-a492-f168e974e117-secret-volume\") pod \"18f0b867-131c-4f6e-a492-f168e974e117\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.339246 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18f0b867-131c-4f6e-a492-f168e974e117-config-volume\") pod \"18f0b867-131c-4f6e-a492-f168e974e117\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.339286 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6m9kw\" (UniqueName: \"kubernetes.io/projected/18f0b867-131c-4f6e-a492-f168e974e117-kube-api-access-6m9kw\") pod \"18f0b867-131c-4f6e-a492-f168e974e117\" (UID: \"18f0b867-131c-4f6e-a492-f168e974e117\") " Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.339978 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18f0b867-131c-4f6e-a492-f168e974e117-config-volume" (OuterVolumeSpecName: "config-volume") pod "18f0b867-131c-4f6e-a492-f168e974e117" (UID: "18f0b867-131c-4f6e-a492-f168e974e117"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.345186 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18f0b867-131c-4f6e-a492-f168e974e117-kube-api-access-6m9kw" (OuterVolumeSpecName: "kube-api-access-6m9kw") pod "18f0b867-131c-4f6e-a492-f168e974e117" (UID: "18f0b867-131c-4f6e-a492-f168e974e117"). InnerVolumeSpecName "kube-api-access-6m9kw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.345826 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f0b867-131c-4f6e-a492-f168e974e117-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "18f0b867-131c-4f6e-a492-f168e974e117" (UID: "18f0b867-131c-4f6e-a492-f168e974e117"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.441247 4959 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18f0b867-131c-4f6e-a492-f168e974e117-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.441289 4959 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18f0b867-131c-4f6e-a492-f168e974e117-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.441301 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6m9kw\" (UniqueName: \"kubernetes.io/projected/18f0b867-131c-4f6e-a492-f168e974e117-kube-api-access-6m9kw\") on node \"crc\" DevicePath \"\"" Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.961316 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" event={"ID":"18f0b867-131c-4f6e-a492-f168e974e117","Type":"ContainerDied","Data":"1c173a97f52be69c6cca73b58fec1c34fa27c1f68e096ca8eac6e5ac620c9aa6"} Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.961376 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c173a97f52be69c6cca73b58fec1c34fa27c1f68e096ca8eac6e5ac620c9aa6" Jan 28 16:00:03 crc kubenswrapper[4959]: I0128 16:00:03.961409 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493600-c9znb" Jan 28 16:00:04 crc kubenswrapper[4959]: I0128 16:00:04.325252 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8"] Jan 28 16:00:04 crc kubenswrapper[4959]: I0128 16:00:04.331872 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493555-dn2f8"] Jan 28 16:00:04 crc kubenswrapper[4959]: I0128 16:00:04.587309 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 16:00:04 crc kubenswrapper[4959]: E0128 16:00:04.587692 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:00:04 crc kubenswrapper[4959]: I0128 16:00:04.599276 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a0e736f-1d0f-4c59-ab45-b057dda052aa" path="/var/lib/kubelet/pods/3a0e736f-1d0f-4c59-ab45-b057dda052aa/volumes" Jan 28 16:00:16 crc kubenswrapper[4959]: I0128 16:00:16.587318 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 16:00:16 crc kubenswrapper[4959]: E0128 16:00:16.588207 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:00:17 crc kubenswrapper[4959]: I0128 16:00:17.566743 4959 scope.go:117] "RemoveContainer" containerID="751135847f1126e12a4a274ff297bcc95a44c2ee3644d4f94d92347be0a41934" Jan 28 16:00:28 crc kubenswrapper[4959]: I0128 16:00:28.587488 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 16:00:28 crc kubenswrapper[4959]: E0128 16:00:28.588339 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:00:40 crc kubenswrapper[4959]: I0128 16:00:40.592338 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 16:00:41 crc kubenswrapper[4959]: I0128 16:00:41.232735 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"cbaf9042edf78b9c82233ee906797f2590871231f458fd5feec3759b4db16887"} Jan 28 16:02:58 crc kubenswrapper[4959]: I0128 16:02:58.689654 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:02:58 crc kubenswrapper[4959]: I0128 16:02:58.690748 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:03:28 crc kubenswrapper[4959]: I0128 16:03:28.690041 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:03:28 crc kubenswrapper[4959]: I0128 16:03:28.690894 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:03:58 crc kubenswrapper[4959]: I0128 16:03:58.689115 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:03:58 crc kubenswrapper[4959]: I0128 16:03:58.689652 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:03:58 crc kubenswrapper[4959]: I0128 16:03:58.689695 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 16:03:58 crc kubenswrapper[4959]: I0128 16:03:58.690331 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cbaf9042edf78b9c82233ee906797f2590871231f458fd5feec3759b4db16887"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:03:58 crc kubenswrapper[4959]: I0128 16:03:58.690384 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://cbaf9042edf78b9c82233ee906797f2590871231f458fd5feec3759b4db16887" gracePeriod=600 Jan 28 16:03:59 crc kubenswrapper[4959]: I0128 16:03:59.815501 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="cbaf9042edf78b9c82233ee906797f2590871231f458fd5feec3759b4db16887" exitCode=0 Jan 28 16:03:59 crc kubenswrapper[4959]: I0128 16:03:59.816028 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"cbaf9042edf78b9c82233ee906797f2590871231f458fd5feec3759b4db16887"} Jan 28 16:03:59 crc kubenswrapper[4959]: I0128 16:03:59.826200 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82"} Jan 28 16:03:59 crc kubenswrapper[4959]: I0128 16:03:59.826256 4959 scope.go:117] "RemoveContainer" containerID="572f786139ed34fd2090192b3c15b77ebe676e6f03744d2b2284f0f0d4a96969" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.099411 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-v4n5h"] Jan 28 16:04:01 crc kubenswrapper[4959]: E0128 16:04:01.100226 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18f0b867-131c-4f6e-a492-f168e974e117" containerName="collect-profiles" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.100242 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="18f0b867-131c-4f6e-a492-f168e974e117" containerName="collect-profiles" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.100428 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="18f0b867-131c-4f6e-a492-f168e974e117" containerName="collect-profiles" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.101910 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.114447 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v4n5h"] Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.246462 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-utilities\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.246888 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-catalog-content\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.247035 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86cfx\" (UniqueName: \"kubernetes.io/projected/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-kube-api-access-86cfx\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.349799 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-utilities\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.350418 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-catalog-content\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.350574 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86cfx\" (UniqueName: \"kubernetes.io/projected/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-kube-api-access-86cfx\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.350482 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-utilities\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.350755 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-catalog-content\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.376526 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86cfx\" (UniqueName: \"kubernetes.io/projected/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-kube-api-access-86cfx\") pod \"certified-operators-v4n5h\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.423042 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:01 crc kubenswrapper[4959]: I0128 16:04:01.908686 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v4n5h"] Jan 28 16:04:02 crc kubenswrapper[4959]: I0128 16:04:02.853410 4959 generic.go:334] "Generic (PLEG): container finished" podID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerID="62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b" exitCode=0 Jan 28 16:04:02 crc kubenswrapper[4959]: I0128 16:04:02.853460 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v4n5h" event={"ID":"89a6a6f3-735d-4e15-adcc-d06b808c9a2a","Type":"ContainerDied","Data":"62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b"} Jan 28 16:04:02 crc kubenswrapper[4959]: I0128 16:04:02.853749 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v4n5h" event={"ID":"89a6a6f3-735d-4e15-adcc-d06b808c9a2a","Type":"ContainerStarted","Data":"769ac86975803626b3a7d374f684c1f3513c9781137611f2180ed3eea44cba9f"} Jan 28 16:04:02 crc kubenswrapper[4959]: I0128 16:04:02.855820 4959 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 16:04:03 crc kubenswrapper[4959]: I0128 16:04:03.874351 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v4n5h" event={"ID":"89a6a6f3-735d-4e15-adcc-d06b808c9a2a","Type":"ContainerStarted","Data":"58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14"} Jan 28 16:04:04 crc kubenswrapper[4959]: I0128 16:04:04.883443 4959 generic.go:334] "Generic (PLEG): container finished" podID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerID="58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14" exitCode=0 Jan 28 16:04:04 crc kubenswrapper[4959]: I0128 16:04:04.883560 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v4n5h" event={"ID":"89a6a6f3-735d-4e15-adcc-d06b808c9a2a","Type":"ContainerDied","Data":"58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14"} Jan 28 16:04:05 crc kubenswrapper[4959]: I0128 16:04:05.893635 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v4n5h" event={"ID":"89a6a6f3-735d-4e15-adcc-d06b808c9a2a","Type":"ContainerStarted","Data":"cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee"} Jan 28 16:04:05 crc kubenswrapper[4959]: I0128 16:04:05.921233 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-v4n5h" podStartSLOduration=2.395637335 podStartE2EDuration="4.921210899s" podCreationTimestamp="2026-01-28 16:04:01 +0000 UTC" firstStartedPulling="2026-01-28 16:04:02.855525552 +0000 UTC m=+2826.301431935" lastFinishedPulling="2026-01-28 16:04:05.381099116 +0000 UTC m=+2828.827005499" observedRunningTime="2026-01-28 16:04:05.910340913 +0000 UTC m=+2829.356247326" watchObservedRunningTime="2026-01-28 16:04:05.921210899 +0000 UTC m=+2829.367117272" Jan 28 16:04:11 crc kubenswrapper[4959]: I0128 16:04:11.423683 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:11 crc kubenswrapper[4959]: I0128 16:04:11.424370 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:11 crc kubenswrapper[4959]: I0128 16:04:11.466303 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:11 crc kubenswrapper[4959]: I0128 16:04:11.979646 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:12 crc kubenswrapper[4959]: I0128 16:04:12.029800 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v4n5h"] Jan 28 16:04:13 crc kubenswrapper[4959]: I0128 16:04:13.952344 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-v4n5h" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerName="registry-server" containerID="cri-o://cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee" gracePeriod=2 Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.389323 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.489032 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-utilities\") pod \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.489068 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-catalog-content\") pod \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.489148 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86cfx\" (UniqueName: \"kubernetes.io/projected/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-kube-api-access-86cfx\") pod \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\" (UID: \"89a6a6f3-735d-4e15-adcc-d06b808c9a2a\") " Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.490527 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-utilities" (OuterVolumeSpecName: "utilities") pod "89a6a6f3-735d-4e15-adcc-d06b808c9a2a" (UID: "89a6a6f3-735d-4e15-adcc-d06b808c9a2a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.495171 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-kube-api-access-86cfx" (OuterVolumeSpecName: "kube-api-access-86cfx") pod "89a6a6f3-735d-4e15-adcc-d06b808c9a2a" (UID: "89a6a6f3-735d-4e15-adcc-d06b808c9a2a"). InnerVolumeSpecName "kube-api-access-86cfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.538448 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89a6a6f3-735d-4e15-adcc-d06b808c9a2a" (UID: "89a6a6f3-735d-4e15-adcc-d06b808c9a2a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.590517 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.590548 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.590561 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86cfx\" (UniqueName: \"kubernetes.io/projected/89a6a6f3-735d-4e15-adcc-d06b808c9a2a-kube-api-access-86cfx\") on node \"crc\" DevicePath \"\"" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.961088 4959 generic.go:334] "Generic (PLEG): container finished" podID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerID="cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee" exitCode=0 Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.961177 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v4n5h" event={"ID":"89a6a6f3-735d-4e15-adcc-d06b808c9a2a","Type":"ContainerDied","Data":"cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee"} Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.961425 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v4n5h" event={"ID":"89a6a6f3-735d-4e15-adcc-d06b808c9a2a","Type":"ContainerDied","Data":"769ac86975803626b3a7d374f684c1f3513c9781137611f2180ed3eea44cba9f"} Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.961448 4959 scope.go:117] "RemoveContainer" containerID="cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.961215 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v4n5h" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.983644 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v4n5h"] Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.988125 4959 scope.go:117] "RemoveContainer" containerID="58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14" Jan 28 16:04:14 crc kubenswrapper[4959]: I0128 16:04:14.989879 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-v4n5h"] Jan 28 16:04:15 crc kubenswrapper[4959]: I0128 16:04:15.005931 4959 scope.go:117] "RemoveContainer" containerID="62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b" Jan 28 16:04:15 crc kubenswrapper[4959]: I0128 16:04:15.039876 4959 scope.go:117] "RemoveContainer" containerID="cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee" Jan 28 16:04:15 crc kubenswrapper[4959]: E0128 16:04:15.040736 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee\": container with ID starting with cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee not found: ID does not exist" containerID="cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee" Jan 28 16:04:15 crc kubenswrapper[4959]: I0128 16:04:15.040785 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee"} err="failed to get container status \"cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee\": rpc error: code = NotFound desc = could not find container \"cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee\": container with ID starting with cf0ffb18c1b69779159aa405b64a3857739953afdd75a9519825534f581b61ee not found: ID does not exist" Jan 28 16:04:15 crc kubenswrapper[4959]: I0128 16:04:15.040817 4959 scope.go:117] "RemoveContainer" containerID="58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14" Jan 28 16:04:15 crc kubenswrapper[4959]: E0128 16:04:15.041157 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14\": container with ID starting with 58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14 not found: ID does not exist" containerID="58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14" Jan 28 16:04:15 crc kubenswrapper[4959]: I0128 16:04:15.041189 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14"} err="failed to get container status \"58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14\": rpc error: code = NotFound desc = could not find container \"58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14\": container with ID starting with 58cb15db358c92b7d7f9736a68448675ea13bfe59a7760e4de3f0623bc286d14 not found: ID does not exist" Jan 28 16:04:15 crc kubenswrapper[4959]: I0128 16:04:15.041211 4959 scope.go:117] "RemoveContainer" containerID="62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b" Jan 28 16:04:15 crc kubenswrapper[4959]: E0128 16:04:15.041668 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b\": container with ID starting with 62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b not found: ID does not exist" containerID="62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b" Jan 28 16:04:15 crc kubenswrapper[4959]: I0128 16:04:15.041701 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b"} err="failed to get container status \"62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b\": rpc error: code = NotFound desc = could not find container \"62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b\": container with ID starting with 62026ccfcdbcb6139aaa16a55e7e538b6778e9c6bfc765713bfd14d8ac17b85b not found: ID does not exist" Jan 28 16:04:16 crc kubenswrapper[4959]: I0128 16:04:16.604677 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" path="/var/lib/kubelet/pods/89a6a6f3-735d-4e15-adcc-d06b808c9a2a/volumes" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.599458 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7b5sk"] Jan 28 16:05:35 crc kubenswrapper[4959]: E0128 16:05:35.600504 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerName="extract-content" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.600523 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerName="extract-content" Jan 28 16:05:35 crc kubenswrapper[4959]: E0128 16:05:35.600532 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerName="extract-utilities" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.600539 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerName="extract-utilities" Jan 28 16:05:35 crc kubenswrapper[4959]: E0128 16:05:35.600577 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerName="registry-server" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.600584 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerName="registry-server" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.600776 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="89a6a6f3-735d-4e15-adcc-d06b808c9a2a" containerName="registry-server" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.601932 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.618072 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7b5sk"] Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.697796 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bpkr\" (UniqueName: \"kubernetes.io/projected/376b1a11-5f2a-41d6-b2ff-690774b2ff28-kube-api-access-6bpkr\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.698274 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-utilities\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.698458 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-catalog-content\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.799764 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-utilities\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.799859 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-catalog-content\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.799909 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bpkr\" (UniqueName: \"kubernetes.io/projected/376b1a11-5f2a-41d6-b2ff-690774b2ff28-kube-api-access-6bpkr\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.800412 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-utilities\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.800540 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-catalog-content\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.827591 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bpkr\" (UniqueName: \"kubernetes.io/projected/376b1a11-5f2a-41d6-b2ff-690774b2ff28-kube-api-access-6bpkr\") pod \"redhat-marketplace-7b5sk\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:35 crc kubenswrapper[4959]: I0128 16:05:35.921636 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:36 crc kubenswrapper[4959]: I0128 16:05:36.387653 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7b5sk"] Jan 28 16:05:36 crc kubenswrapper[4959]: I0128 16:05:36.558948 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7b5sk" event={"ID":"376b1a11-5f2a-41d6-b2ff-690774b2ff28","Type":"ContainerStarted","Data":"3d444ee214920de9dfb7fd40b464d50da1e8af811f38aa86e7db697d423ee556"} Jan 28 16:05:37 crc kubenswrapper[4959]: I0128 16:05:37.573929 4959 generic.go:334] "Generic (PLEG): container finished" podID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerID="7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422" exitCode=0 Jan 28 16:05:37 crc kubenswrapper[4959]: I0128 16:05:37.573997 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7b5sk" event={"ID":"376b1a11-5f2a-41d6-b2ff-690774b2ff28","Type":"ContainerDied","Data":"7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422"} Jan 28 16:05:39 crc kubenswrapper[4959]: I0128 16:05:39.588979 4959 generic.go:334] "Generic (PLEG): container finished" podID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerID="820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6" exitCode=0 Jan 28 16:05:39 crc kubenswrapper[4959]: I0128 16:05:39.589505 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7b5sk" event={"ID":"376b1a11-5f2a-41d6-b2ff-690774b2ff28","Type":"ContainerDied","Data":"820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6"} Jan 28 16:05:40 crc kubenswrapper[4959]: I0128 16:05:40.598200 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7b5sk" event={"ID":"376b1a11-5f2a-41d6-b2ff-690774b2ff28","Type":"ContainerStarted","Data":"7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44"} Jan 28 16:05:40 crc kubenswrapper[4959]: I0128 16:05:40.622057 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7b5sk" podStartSLOduration=3.17413017 podStartE2EDuration="5.622036215s" podCreationTimestamp="2026-01-28 16:05:35 +0000 UTC" firstStartedPulling="2026-01-28 16:05:37.575786635 +0000 UTC m=+2921.021693018" lastFinishedPulling="2026-01-28 16:05:40.02369268 +0000 UTC m=+2923.469599063" observedRunningTime="2026-01-28 16:05:40.615265999 +0000 UTC m=+2924.061172382" watchObservedRunningTime="2026-01-28 16:05:40.622036215 +0000 UTC m=+2924.067942598" Jan 28 16:05:45 crc kubenswrapper[4959]: I0128 16:05:45.922478 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:45 crc kubenswrapper[4959]: I0128 16:05:45.923651 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:45 crc kubenswrapper[4959]: I0128 16:05:45.965286 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:46 crc kubenswrapper[4959]: I0128 16:05:46.690783 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:46 crc kubenswrapper[4959]: I0128 16:05:46.737709 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7b5sk"] Jan 28 16:05:48 crc kubenswrapper[4959]: I0128 16:05:48.657947 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7b5sk" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerName="registry-server" containerID="cri-o://7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44" gracePeriod=2 Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.049866 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.212984 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-utilities\") pod \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.213180 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-catalog-content\") pod \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.213238 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bpkr\" (UniqueName: \"kubernetes.io/projected/376b1a11-5f2a-41d6-b2ff-690774b2ff28-kube-api-access-6bpkr\") pod \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\" (UID: \"376b1a11-5f2a-41d6-b2ff-690774b2ff28\") " Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.214173 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-utilities" (OuterVolumeSpecName: "utilities") pod "376b1a11-5f2a-41d6-b2ff-690774b2ff28" (UID: "376b1a11-5f2a-41d6-b2ff-690774b2ff28"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.221237 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/376b1a11-5f2a-41d6-b2ff-690774b2ff28-kube-api-access-6bpkr" (OuterVolumeSpecName: "kube-api-access-6bpkr") pod "376b1a11-5f2a-41d6-b2ff-690774b2ff28" (UID: "376b1a11-5f2a-41d6-b2ff-690774b2ff28"). InnerVolumeSpecName "kube-api-access-6bpkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.240089 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "376b1a11-5f2a-41d6-b2ff-690774b2ff28" (UID: "376b1a11-5f2a-41d6-b2ff-690774b2ff28"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.316064 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.316118 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bpkr\" (UniqueName: \"kubernetes.io/projected/376b1a11-5f2a-41d6-b2ff-690774b2ff28-kube-api-access-6bpkr\") on node \"crc\" DevicePath \"\"" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.316129 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/376b1a11-5f2a-41d6-b2ff-690774b2ff28-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.667086 4959 generic.go:334] "Generic (PLEG): container finished" podID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerID="7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44" exitCode=0 Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.667140 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7b5sk" event={"ID":"376b1a11-5f2a-41d6-b2ff-690774b2ff28","Type":"ContainerDied","Data":"7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44"} Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.667227 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7b5sk" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.667292 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7b5sk" event={"ID":"376b1a11-5f2a-41d6-b2ff-690774b2ff28","Type":"ContainerDied","Data":"3d444ee214920de9dfb7fd40b464d50da1e8af811f38aa86e7db697d423ee556"} Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.667347 4959 scope.go:117] "RemoveContainer" containerID="7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.690156 4959 scope.go:117] "RemoveContainer" containerID="820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.700719 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7b5sk"] Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.721162 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7b5sk"] Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.724874 4959 scope.go:117] "RemoveContainer" containerID="7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.758711 4959 scope.go:117] "RemoveContainer" containerID="7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44" Jan 28 16:05:49 crc kubenswrapper[4959]: E0128 16:05:49.759332 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44\": container with ID starting with 7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44 not found: ID does not exist" containerID="7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.759398 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44"} err="failed to get container status \"7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44\": rpc error: code = NotFound desc = could not find container \"7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44\": container with ID starting with 7d37daeef41b3e9405c651bd61b7dec073a5f62c203a6fefe4ac3b41da83bb44 not found: ID does not exist" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.759440 4959 scope.go:117] "RemoveContainer" containerID="820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6" Jan 28 16:05:49 crc kubenswrapper[4959]: E0128 16:05:49.759831 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6\": container with ID starting with 820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6 not found: ID does not exist" containerID="820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.759857 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6"} err="failed to get container status \"820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6\": rpc error: code = NotFound desc = could not find container \"820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6\": container with ID starting with 820a6215dddedbab7de81e59057b578a29dfee77f9a63218750a919915f607b6 not found: ID does not exist" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.759871 4959 scope.go:117] "RemoveContainer" containerID="7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422" Jan 28 16:05:49 crc kubenswrapper[4959]: E0128 16:05:49.760346 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422\": container with ID starting with 7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422 not found: ID does not exist" containerID="7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422" Jan 28 16:05:49 crc kubenswrapper[4959]: I0128 16:05:49.760386 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422"} err="failed to get container status \"7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422\": rpc error: code = NotFound desc = could not find container \"7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422\": container with ID starting with 7900acd4cabd3de4352cfe054eea091e9675b9fdf5a3f0c6799e0cafad8d5422 not found: ID does not exist" Jan 28 16:05:50 crc kubenswrapper[4959]: I0128 16:05:50.596024 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" path="/var/lib/kubelet/pods/376b1a11-5f2a-41d6-b2ff-690774b2ff28/volumes" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.850773 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-v99kd/must-gather-m8bqf"] Jan 28 16:05:54 crc kubenswrapper[4959]: E0128 16:05:54.851519 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerName="extract-utilities" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.851538 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerName="extract-utilities" Jan 28 16:05:54 crc kubenswrapper[4959]: E0128 16:05:54.851553 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerName="extract-content" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.851562 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerName="extract-content" Jan 28 16:05:54 crc kubenswrapper[4959]: E0128 16:05:54.851593 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerName="registry-server" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.851602 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerName="registry-server" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.851791 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="376b1a11-5f2a-41d6-b2ff-690774b2ff28" containerName="registry-server" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.852849 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.858649 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-v99kd"/"kube-root-ca.crt" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.858735 4959 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-v99kd"/"default-dockercfg-czzjl" Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.866805 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-v99kd/must-gather-m8bqf"] Jan 28 16:05:54 crc kubenswrapper[4959]: I0128 16:05:54.866980 4959 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-v99kd"/"openshift-service-ca.crt" Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.011927 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/992138c6-26a8-4e87-91e9-813c936ef34d-must-gather-output\") pod \"must-gather-m8bqf\" (UID: \"992138c6-26a8-4e87-91e9-813c936ef34d\") " pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.011978 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llm7v\" (UniqueName: \"kubernetes.io/projected/992138c6-26a8-4e87-91e9-813c936ef34d-kube-api-access-llm7v\") pod \"must-gather-m8bqf\" (UID: \"992138c6-26a8-4e87-91e9-813c936ef34d\") " pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.113244 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/992138c6-26a8-4e87-91e9-813c936ef34d-must-gather-output\") pod \"must-gather-m8bqf\" (UID: \"992138c6-26a8-4e87-91e9-813c936ef34d\") " pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.113297 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llm7v\" (UniqueName: \"kubernetes.io/projected/992138c6-26a8-4e87-91e9-813c936ef34d-kube-api-access-llm7v\") pod \"must-gather-m8bqf\" (UID: \"992138c6-26a8-4e87-91e9-813c936ef34d\") " pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.114149 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/992138c6-26a8-4e87-91e9-813c936ef34d-must-gather-output\") pod \"must-gather-m8bqf\" (UID: \"992138c6-26a8-4e87-91e9-813c936ef34d\") " pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.132395 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llm7v\" (UniqueName: \"kubernetes.io/projected/992138c6-26a8-4e87-91e9-813c936ef34d-kube-api-access-llm7v\") pod \"must-gather-m8bqf\" (UID: \"992138c6-26a8-4e87-91e9-813c936ef34d\") " pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.177042 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.595542 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-v99kd/must-gather-m8bqf"] Jan 28 16:05:55 crc kubenswrapper[4959]: I0128 16:05:55.710668 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/must-gather-m8bqf" event={"ID":"992138c6-26a8-4e87-91e9-813c936ef34d","Type":"ContainerStarted","Data":"ace7701864f897ab926186569c178b9472475ddd0b37f449f0535584521ee0b1"} Jan 28 16:05:58 crc kubenswrapper[4959]: I0128 16:05:58.689476 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:05:58 crc kubenswrapper[4959]: I0128 16:05:58.690077 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.709178 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tvkpj"] Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.711429 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.714517 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvkpj"] Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.795624 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-catalog-content\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.795816 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-utilities\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.795909 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zctnj\" (UniqueName: \"kubernetes.io/projected/aa87f946-ebff-4bde-b6e1-cb66757459c2-kube-api-access-zctnj\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.904314 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-utilities\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.904451 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zctnj\" (UniqueName: \"kubernetes.io/projected/aa87f946-ebff-4bde-b6e1-cb66757459c2-kube-api-access-zctnj\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.904724 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-catalog-content\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.905314 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-utilities\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.905374 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-catalog-content\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:05:59 crc kubenswrapper[4959]: I0128 16:05:59.928549 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zctnj\" (UniqueName: \"kubernetes.io/projected/aa87f946-ebff-4bde-b6e1-cb66757459c2-kube-api-access-zctnj\") pod \"redhat-operators-tvkpj\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:06:00 crc kubenswrapper[4959]: I0128 16:06:00.031659 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.001801 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvkpj"] Jan 28 16:06:03 crc kubenswrapper[4959]: W0128 16:06:03.018749 4959 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa87f946_ebff_4bde_b6e1_cb66757459c2.slice/crio-acf00ce2b552b5a4a95392fe8d9b3a7bedb9a5480d1fe05735cc89b5e370e22b WatchSource:0}: Error finding container acf00ce2b552b5a4a95392fe8d9b3a7bedb9a5480d1fe05735cc89b5e370e22b: Status 404 returned error can't find the container with id acf00ce2b552b5a4a95392fe8d9b3a7bedb9a5480d1fe05735cc89b5e370e22b Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.538489 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-v99kd/crc-debug-c7f2h"] Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.540034 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.681736 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9cbr\" (UniqueName: \"kubernetes.io/projected/b19230c5-157a-4927-8dc8-e90f202397ab-kube-api-access-q9cbr\") pod \"crc-debug-c7f2h\" (UID: \"b19230c5-157a-4927-8dc8-e90f202397ab\") " pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.682224 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b19230c5-157a-4927-8dc8-e90f202397ab-host\") pod \"crc-debug-c7f2h\" (UID: \"b19230c5-157a-4927-8dc8-e90f202397ab\") " pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.784865 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b19230c5-157a-4927-8dc8-e90f202397ab-host\") pod \"crc-debug-c7f2h\" (UID: \"b19230c5-157a-4927-8dc8-e90f202397ab\") " pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.784995 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9cbr\" (UniqueName: \"kubernetes.io/projected/b19230c5-157a-4927-8dc8-e90f202397ab-kube-api-access-q9cbr\") pod \"crc-debug-c7f2h\" (UID: \"b19230c5-157a-4927-8dc8-e90f202397ab\") " pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.785264 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b19230c5-157a-4927-8dc8-e90f202397ab-host\") pod \"crc-debug-c7f2h\" (UID: \"b19230c5-157a-4927-8dc8-e90f202397ab\") " pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.788240 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/must-gather-m8bqf" event={"ID":"992138c6-26a8-4e87-91e9-813c936ef34d","Type":"ContainerStarted","Data":"9a65cbf4c681e1eb6ba69fe9de07c585bd28c840cb65463a3eed46a96f2c5c3d"} Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.788291 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/must-gather-m8bqf" event={"ID":"992138c6-26a8-4e87-91e9-813c936ef34d","Type":"ContainerStarted","Data":"e86d8212efa6d6b0c71daa2c3dfb246903eef20dc7ac05364096bcf93d98760d"} Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.791998 4959 generic.go:334] "Generic (PLEG): container finished" podID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerID="e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514" exitCode=0 Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.792211 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkpj" event={"ID":"aa87f946-ebff-4bde-b6e1-cb66757459c2","Type":"ContainerDied","Data":"e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514"} Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.792313 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkpj" event={"ID":"aa87f946-ebff-4bde-b6e1-cb66757459c2","Type":"ContainerStarted","Data":"acf00ce2b552b5a4a95392fe8d9b3a7bedb9a5480d1fe05735cc89b5e370e22b"} Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.811056 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-v99kd/must-gather-m8bqf" podStartSLOduration=2.729870376 podStartE2EDuration="9.811034238s" podCreationTimestamp="2026-01-28 16:05:54 +0000 UTC" firstStartedPulling="2026-01-28 16:05:55.619547837 +0000 UTC m=+2939.065454220" lastFinishedPulling="2026-01-28 16:06:02.700711699 +0000 UTC m=+2946.146618082" observedRunningTime="2026-01-28 16:06:03.806085676 +0000 UTC m=+2947.251992069" watchObservedRunningTime="2026-01-28 16:06:03.811034238 +0000 UTC m=+2947.256940631" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.816638 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9cbr\" (UniqueName: \"kubernetes.io/projected/b19230c5-157a-4927-8dc8-e90f202397ab-kube-api-access-q9cbr\") pod \"crc-debug-c7f2h\" (UID: \"b19230c5-157a-4927-8dc8-e90f202397ab\") " pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:03 crc kubenswrapper[4959]: I0128 16:06:03.854804 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:04 crc kubenswrapper[4959]: I0128 16:06:04.799956 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/crc-debug-c7f2h" event={"ID":"b19230c5-157a-4927-8dc8-e90f202397ab","Type":"ContainerStarted","Data":"49500e5a140aa130609da346baae266fdcb1455435d275a59de39da0e39164d7"} Jan 28 16:06:04 crc kubenswrapper[4959]: I0128 16:06:04.803635 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkpj" event={"ID":"aa87f946-ebff-4bde-b6e1-cb66757459c2","Type":"ContainerStarted","Data":"9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748"} Jan 28 16:06:05 crc kubenswrapper[4959]: I0128 16:06:05.818615 4959 generic.go:334] "Generic (PLEG): container finished" podID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerID="9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748" exitCode=0 Jan 28 16:06:05 crc kubenswrapper[4959]: I0128 16:06:05.818986 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkpj" event={"ID":"aa87f946-ebff-4bde-b6e1-cb66757459c2","Type":"ContainerDied","Data":"9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748"} Jan 28 16:06:09 crc kubenswrapper[4959]: I0128 16:06:09.855373 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkpj" event={"ID":"aa87f946-ebff-4bde-b6e1-cb66757459c2","Type":"ContainerStarted","Data":"eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d"} Jan 28 16:06:09 crc kubenswrapper[4959]: I0128 16:06:09.878050 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tvkpj" podStartSLOduration=5.40035035 podStartE2EDuration="10.878026931s" podCreationTimestamp="2026-01-28 16:05:59 +0000 UTC" firstStartedPulling="2026-01-28 16:06:03.793966538 +0000 UTC m=+2947.239872921" lastFinishedPulling="2026-01-28 16:06:09.271643119 +0000 UTC m=+2952.717549502" observedRunningTime="2026-01-28 16:06:09.873998612 +0000 UTC m=+2953.319905005" watchObservedRunningTime="2026-01-28 16:06:09.878026931 +0000 UTC m=+2953.323933334" Jan 28 16:06:10 crc kubenswrapper[4959]: I0128 16:06:10.031878 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:06:10 crc kubenswrapper[4959]: I0128 16:06:10.032018 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:06:11 crc kubenswrapper[4959]: I0128 16:06:11.080637 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" probeResult="failure" output=< Jan 28 16:06:11 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 16:06:11 crc kubenswrapper[4959]: > Jan 28 16:06:17 crc kubenswrapper[4959]: I0128 16:06:17.939227 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/crc-debug-c7f2h" event={"ID":"b19230c5-157a-4927-8dc8-e90f202397ab","Type":"ContainerStarted","Data":"e87a09b7e019ec3fc88382b3d5c03944206a9741291dce952b7dd876d0c39c86"} Jan 28 16:06:17 crc kubenswrapper[4959]: I0128 16:06:17.956391 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-v99kd/crc-debug-c7f2h" podStartSLOduration=2.0248308760000002 podStartE2EDuration="14.956371495s" podCreationTimestamp="2026-01-28 16:06:03 +0000 UTC" firstStartedPulling="2026-01-28 16:06:03.88202577 +0000 UTC m=+2947.327932143" lastFinishedPulling="2026-01-28 16:06:16.813566379 +0000 UTC m=+2960.259472762" observedRunningTime="2026-01-28 16:06:17.953960487 +0000 UTC m=+2961.399866870" watchObservedRunningTime="2026-01-28 16:06:17.956371495 +0000 UTC m=+2961.402277878" Jan 28 16:06:21 crc kubenswrapper[4959]: I0128 16:06:21.089893 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" probeResult="failure" output=< Jan 28 16:06:21 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 16:06:21 crc kubenswrapper[4959]: > Jan 28 16:06:28 crc kubenswrapper[4959]: I0128 16:06:28.689076 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:06:28 crc kubenswrapper[4959]: I0128 16:06:28.689701 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:06:31 crc kubenswrapper[4959]: I0128 16:06:31.094613 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" probeResult="failure" output=< Jan 28 16:06:31 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 16:06:31 crc kubenswrapper[4959]: > Jan 28 16:06:39 crc kubenswrapper[4959]: I0128 16:06:39.107461 4959 generic.go:334] "Generic (PLEG): container finished" podID="b19230c5-157a-4927-8dc8-e90f202397ab" containerID="e87a09b7e019ec3fc88382b3d5c03944206a9741291dce952b7dd876d0c39c86" exitCode=0 Jan 28 16:06:39 crc kubenswrapper[4959]: I0128 16:06:39.107519 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/crc-debug-c7f2h" event={"ID":"b19230c5-157a-4927-8dc8-e90f202397ab","Type":"ContainerDied","Data":"e87a09b7e019ec3fc88382b3d5c03944206a9741291dce952b7dd876d0c39c86"} Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.211684 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.245208 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-v99kd/crc-debug-c7f2h"] Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.254916 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-v99kd/crc-debug-c7f2h"] Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.348511 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b19230c5-157a-4927-8dc8-e90f202397ab-host\") pod \"b19230c5-157a-4927-8dc8-e90f202397ab\" (UID: \"b19230c5-157a-4927-8dc8-e90f202397ab\") " Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.348614 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b19230c5-157a-4927-8dc8-e90f202397ab-host" (OuterVolumeSpecName: "host") pod "b19230c5-157a-4927-8dc8-e90f202397ab" (UID: "b19230c5-157a-4927-8dc8-e90f202397ab"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.348996 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9cbr\" (UniqueName: \"kubernetes.io/projected/b19230c5-157a-4927-8dc8-e90f202397ab-kube-api-access-q9cbr\") pod \"b19230c5-157a-4927-8dc8-e90f202397ab\" (UID: \"b19230c5-157a-4927-8dc8-e90f202397ab\") " Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.349627 4959 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b19230c5-157a-4927-8dc8-e90f202397ab-host\") on node \"crc\" DevicePath \"\"" Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.355506 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b19230c5-157a-4927-8dc8-e90f202397ab-kube-api-access-q9cbr" (OuterVolumeSpecName: "kube-api-access-q9cbr") pod "b19230c5-157a-4927-8dc8-e90f202397ab" (UID: "b19230c5-157a-4927-8dc8-e90f202397ab"). InnerVolumeSpecName "kube-api-access-q9cbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.451548 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9cbr\" (UniqueName: \"kubernetes.io/projected/b19230c5-157a-4927-8dc8-e90f202397ab-kube-api-access-q9cbr\") on node \"crc\" DevicePath \"\"" Jan 28 16:06:40 crc kubenswrapper[4959]: I0128 16:06:40.596603 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b19230c5-157a-4927-8dc8-e90f202397ab" path="/var/lib/kubelet/pods/b19230c5-157a-4927-8dc8-e90f202397ab/volumes" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.081016 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" probeResult="failure" output=< Jan 28 16:06:41 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 16:06:41 crc kubenswrapper[4959]: > Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.143417 4959 scope.go:117] "RemoveContainer" containerID="e87a09b7e019ec3fc88382b3d5c03944206a9741291dce952b7dd876d0c39c86" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.143670 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/crc-debug-c7f2h" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.452996 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-v99kd/crc-debug-t5kqt"] Jan 28 16:06:41 crc kubenswrapper[4959]: E0128 16:06:41.453411 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b19230c5-157a-4927-8dc8-e90f202397ab" containerName="container-00" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.453427 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="b19230c5-157a-4927-8dc8-e90f202397ab" containerName="container-00" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.453637 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="b19230c5-157a-4927-8dc8-e90f202397ab" containerName="container-00" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.454317 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.569307 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4773737a-28c8-460d-9f6d-58207682755c-host\") pod \"crc-debug-t5kqt\" (UID: \"4773737a-28c8-460d-9f6d-58207682755c\") " pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.569443 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-js49l\" (UniqueName: \"kubernetes.io/projected/4773737a-28c8-460d-9f6d-58207682755c-kube-api-access-js49l\") pod \"crc-debug-t5kqt\" (UID: \"4773737a-28c8-460d-9f6d-58207682755c\") " pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.671065 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-js49l\" (UniqueName: \"kubernetes.io/projected/4773737a-28c8-460d-9f6d-58207682755c-kube-api-access-js49l\") pod \"crc-debug-t5kqt\" (UID: \"4773737a-28c8-460d-9f6d-58207682755c\") " pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.671246 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4773737a-28c8-460d-9f6d-58207682755c-host\") pod \"crc-debug-t5kqt\" (UID: \"4773737a-28c8-460d-9f6d-58207682755c\") " pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.671400 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4773737a-28c8-460d-9f6d-58207682755c-host\") pod \"crc-debug-t5kqt\" (UID: \"4773737a-28c8-460d-9f6d-58207682755c\") " pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.698000 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-js49l\" (UniqueName: \"kubernetes.io/projected/4773737a-28c8-460d-9f6d-58207682755c-kube-api-access-js49l\") pod \"crc-debug-t5kqt\" (UID: \"4773737a-28c8-460d-9f6d-58207682755c\") " pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:41 crc kubenswrapper[4959]: I0128 16:06:41.772943 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:42 crc kubenswrapper[4959]: I0128 16:06:42.156648 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/crc-debug-t5kqt" event={"ID":"4773737a-28c8-460d-9f6d-58207682755c","Type":"ContainerStarted","Data":"2ce315b343ba0423556c3b0e63de1a4050598852c6ed213bed737c9da7a111e2"} Jan 28 16:06:42 crc kubenswrapper[4959]: I0128 16:06:42.157036 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/crc-debug-t5kqt" event={"ID":"4773737a-28c8-460d-9f6d-58207682755c","Type":"ContainerStarted","Data":"5d4740b8f2221c71bc9a579cbf02c8cbe938f48213f57c2f20d1edf32a99d2bb"} Jan 28 16:06:42 crc kubenswrapper[4959]: I0128 16:06:42.201819 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-v99kd/crc-debug-t5kqt"] Jan 28 16:06:42 crc kubenswrapper[4959]: I0128 16:06:42.208764 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-v99kd/crc-debug-t5kqt"] Jan 28 16:06:43 crc kubenswrapper[4959]: I0128 16:06:43.167009 4959 generic.go:334] "Generic (PLEG): container finished" podID="4773737a-28c8-460d-9f6d-58207682755c" containerID="2ce315b343ba0423556c3b0e63de1a4050598852c6ed213bed737c9da7a111e2" exitCode=1 Jan 28 16:06:43 crc kubenswrapper[4959]: I0128 16:06:43.264529 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:43 crc kubenswrapper[4959]: I0128 16:06:43.402816 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-js49l\" (UniqueName: \"kubernetes.io/projected/4773737a-28c8-460d-9f6d-58207682755c-kube-api-access-js49l\") pod \"4773737a-28c8-460d-9f6d-58207682755c\" (UID: \"4773737a-28c8-460d-9f6d-58207682755c\") " Jan 28 16:06:43 crc kubenswrapper[4959]: I0128 16:06:43.403011 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4773737a-28c8-460d-9f6d-58207682755c-host\") pod \"4773737a-28c8-460d-9f6d-58207682755c\" (UID: \"4773737a-28c8-460d-9f6d-58207682755c\") " Jan 28 16:06:43 crc kubenswrapper[4959]: I0128 16:06:43.403472 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4773737a-28c8-460d-9f6d-58207682755c-host" (OuterVolumeSpecName: "host") pod "4773737a-28c8-460d-9f6d-58207682755c" (UID: "4773737a-28c8-460d-9f6d-58207682755c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 16:06:43 crc kubenswrapper[4959]: I0128 16:06:43.408300 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4773737a-28c8-460d-9f6d-58207682755c-kube-api-access-js49l" (OuterVolumeSpecName: "kube-api-access-js49l") pod "4773737a-28c8-460d-9f6d-58207682755c" (UID: "4773737a-28c8-460d-9f6d-58207682755c"). InnerVolumeSpecName "kube-api-access-js49l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:06:43 crc kubenswrapper[4959]: I0128 16:06:43.505008 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-js49l\" (UniqueName: \"kubernetes.io/projected/4773737a-28c8-460d-9f6d-58207682755c-kube-api-access-js49l\") on node \"crc\" DevicePath \"\"" Jan 28 16:06:43 crc kubenswrapper[4959]: I0128 16:06:43.505339 4959 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4773737a-28c8-460d-9f6d-58207682755c-host\") on node \"crc\" DevicePath \"\"" Jan 28 16:06:44 crc kubenswrapper[4959]: I0128 16:06:44.176814 4959 scope.go:117] "RemoveContainer" containerID="2ce315b343ba0423556c3b0e63de1a4050598852c6ed213bed737c9da7a111e2" Jan 28 16:06:44 crc kubenswrapper[4959]: I0128 16:06:44.176839 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/crc-debug-t5kqt" Jan 28 16:06:44 crc kubenswrapper[4959]: I0128 16:06:44.598904 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4773737a-28c8-460d-9f6d-58207682755c" path="/var/lib/kubelet/pods/4773737a-28c8-460d-9f6d-58207682755c/volumes" Jan 28 16:06:51 crc kubenswrapper[4959]: I0128 16:06:51.071742 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" probeResult="failure" output=< Jan 28 16:06:51 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 16:06:51 crc kubenswrapper[4959]: > Jan 28 16:06:58 crc kubenswrapper[4959]: I0128 16:06:58.689354 4959 patch_prober.go:28] interesting pod/machine-config-daemon-r75mw container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 16:06:58 crc kubenswrapper[4959]: I0128 16:06:58.690053 4959 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 16:06:58 crc kubenswrapper[4959]: I0128 16:06:58.690136 4959 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" Jan 28 16:06:58 crc kubenswrapper[4959]: I0128 16:06:58.690963 4959 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82"} pod="openshift-machine-config-operator/machine-config-daemon-r75mw" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 16:06:58 crc kubenswrapper[4959]: I0128 16:06:58.691037 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" containerName="machine-config-daemon" containerID="cri-o://eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" gracePeriod=600 Jan 28 16:06:58 crc kubenswrapper[4959]: E0128 16:06:58.817383 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:06:59 crc kubenswrapper[4959]: I0128 16:06:59.345370 4959 generic.go:334] "Generic (PLEG): container finished" podID="f22b9702-cd33-405b-9cea-babf675908f5" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" exitCode=0 Jan 28 16:06:59 crc kubenswrapper[4959]: I0128 16:06:59.345467 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerDied","Data":"eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82"} Jan 28 16:06:59 crc kubenswrapper[4959]: I0128 16:06:59.345750 4959 scope.go:117] "RemoveContainer" containerID="cbaf9042edf78b9c82233ee906797f2590871231f458fd5feec3759b4db16887" Jan 28 16:06:59 crc kubenswrapper[4959]: I0128 16:06:59.346395 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:06:59 crc kubenswrapper[4959]: E0128 16:06:59.346782 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:07:00 crc kubenswrapper[4959]: I0128 16:07:00.479198 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7ff5475cc9-zcgr9_c30a71cc-6b61-4950-b2a4-895699bc5d8f/init/0.log" Jan 28 16:07:00 crc kubenswrapper[4959]: I0128 16:07:00.712048 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7ff5475cc9-zcgr9_c30a71cc-6b61-4950-b2a4-895699bc5d8f/dnsmasq-dns/0.log" Jan 28 16:07:00 crc kubenswrapper[4959]: I0128 16:07:00.761655 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7ff5475cc9-zcgr9_c30a71cc-6b61-4950-b2a4-895699bc5d8f/init/0.log" Jan 28 16:07:00 crc kubenswrapper[4959]: I0128 16:07:00.884811 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_4c05f0db-12b1-4491-8680-2df359888603/kube-state-metrics/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.051769 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_e7bad372-1c02-48b3-838d-7d5e97b30b57/memcached/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.115416 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_54187dbe-318c-4070-9771-d3d98fc10457/mysql-bootstrap/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.122996 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" probeResult="failure" output=< Jan 28 16:07:01 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 16:07:01 crc kubenswrapper[4959]: > Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.301268 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_54187dbe-318c-4070-9771-d3d98fc10457/mysql-bootstrap/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.364740 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_54187dbe-318c-4070-9771-d3d98fc10457/galera/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.490184 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_d7f92998-4485-45fa-b5c5-cbb5211799c6/mysql-bootstrap/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.703762 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_d7f92998-4485-45fa-b5c5-cbb5211799c6/galera/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.847387 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_d7f92998-4485-45fa-b5c5-cbb5211799c6/mysql-bootstrap/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.853941 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-bp544_93fc210e-4599-4436-b8e6-a20a8c5cd2b4/ovn-controller/0.log" Jan 28 16:07:01 crc kubenswrapper[4959]: I0128 16:07:01.923030 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-24g5r_f9cf2a2e-c773-4fcc-86ea-01ed47c305cb/openstack-network-exporter/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.110973 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zf2dp_8171da25-3ff2-431c-a3a6-482426500111/ovsdb-server-init/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.326786 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zf2dp_8171da25-3ff2-431c-a3a6-482426500111/ovs-vswitchd/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.339977 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zf2dp_8171da25-3ff2-431c-a3a6-482426500111/ovsdb-server-init/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.366691 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-zf2dp_8171da25-3ff2-431c-a3a6-482426500111/ovsdb-server/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.521163 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_48522d8d-37f7-4011-b382-85567a833329/ovn-northd/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.539332 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_48522d8d-37f7-4011-b382-85567a833329/openstack-network-exporter/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.581200 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_72dccd04-5057-4f99-942e-750ca1f7b3b5/openstack-network-exporter/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.760461 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_72dccd04-5057-4f99-942e-750ca1f7b3b5/ovsdbserver-nb/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.812784 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_3e4ce542-8a53-4fea-b644-4ccb423a313b/openstack-network-exporter/0.log" Jan 28 16:07:02 crc kubenswrapper[4959]: I0128 16:07:02.820370 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_3e4ce542-8a53-4fea-b644-4ccb423a313b/ovsdbserver-sb/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.016977 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ee1fc53a-3817-4c94-8bd6-569c089c02cb/setup-container/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.244804 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ee1fc53a-3817-4c94-8bd6-569c089c02cb/setup-container/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.257521 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_a81258f3-e48f-44f0-93d9-02e58302683a/setup-container/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.265649 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ee1fc53a-3817-4c94-8bd6-569c089c02cb/rabbitmq/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.442077 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_a81258f3-e48f-44f0-93d9-02e58302683a/setup-container/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.506520 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_a81258f3-e48f-44f0-93d9-02e58302683a/rabbitmq/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.519800 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-c8wlm_b009e707-15ca-458c-ab37-d3cab102e497/swift-ring-rebalance/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.883086 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/account-auditor/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.914994 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/account-replicator/0.log" Jan 28 16:07:03 crc kubenswrapper[4959]: I0128 16:07:03.937018 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/account-reaper/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.137334 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/account-server/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.146760 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/container-auditor/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.161687 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/container-replicator/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.177634 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/container-server/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.367673 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/object-auditor/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.382983 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/container-updater/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.424015 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/object-expirer/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.424914 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/object-replicator/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.622053 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/object-server/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.647041 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/rsync/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.675247 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/object-updater/0.log" Jan 28 16:07:04 crc kubenswrapper[4959]: I0128 16:07:04.736509 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2c21863c-592f-436a-8fe2-06b0f78b7755/swift-recon-cron/0.log" Jan 28 16:07:11 crc kubenswrapper[4959]: I0128 16:07:11.085986 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" probeResult="failure" output=< Jan 28 16:07:11 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 16:07:11 crc kubenswrapper[4959]: > Jan 28 16:07:12 crc kubenswrapper[4959]: I0128 16:07:12.593831 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:07:12 crc kubenswrapper[4959]: E0128 16:07:12.594042 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:07:21 crc kubenswrapper[4959]: I0128 16:07:21.080016 4959 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" probeResult="failure" output=< Jan 28 16:07:21 crc kubenswrapper[4959]: timeout: failed to connect service ":50051" within 1s Jan 28 16:07:21 crc kubenswrapper[4959]: > Jan 28 16:07:21 crc kubenswrapper[4959]: I0128 16:07:21.858367 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6bc7f4f4cf-kz6n7_174adb74-3ce7-4082-932c-4b8c00059fc7/manager/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.049350 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45_db003231-aaa9-4d1d-9d56-046741bb4b32/util/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.246271 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45_db003231-aaa9-4d1d-9d56-046741bb4b32/util/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.247839 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45_db003231-aaa9-4d1d-9d56-046741bb4b32/pull/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.248198 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45_db003231-aaa9-4d1d-9d56-046741bb4b32/pull/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.430829 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45_db003231-aaa9-4d1d-9d56-046741bb4b32/util/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.463391 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45_db003231-aaa9-4d1d-9d56-046741bb4b32/pull/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.513690 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ced7e1ada3abf6b3b63db7d20c8ec826fce876df6438a829033802f9bd6vs45_db003231-aaa9-4d1d-9d56-046741bb4b32/extract/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.649921 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-f6487bd57-k75pz_9234aa7e-f345-4186-bec5-e54051a95f1f/manager/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.690697 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-66dfbd6f5d-zm9pn_6173ecaa-d123-4b57-8ffd-a1c19295b06f/manager/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.921269 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6db5dbd896-d6t5g_f957e48d-1d55-4d55-8059-693078a8db5f/manager/0.log" Jan 28 16:07:22 crc kubenswrapper[4959]: I0128 16:07:22.938754 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-587c6bfdcf-pd5tw_47665907-f1ce-43a1-a3b0-1510bd987a4f/manager/0.log" Jan 28 16:07:23 crc kubenswrapper[4959]: I0128 16:07:23.182147 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5fb775575f-rcpd5_666e5c47-61d6-4fdf-bbaf-7aae03a06912/manager/0.log" Jan 28 16:07:23 crc kubenswrapper[4959]: I0128 16:07:23.281229 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79955696d6-nzlrd_d3950d98-83ab-4ad0-b91c-cb838ae61278/manager/0.log" Jan 28 16:07:23 crc kubenswrapper[4959]: I0128 16:07:23.393417 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-958664b5-vcpch_86f9260b-f92a-4dc1-9445-8bb3de058537/manager/0.log" Jan 28 16:07:23 crc kubenswrapper[4959]: I0128 16:07:23.587648 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b84b46695-wfxq5_08b2bcb6-f6da-4a40-95c6-c225a4c145fd/manager/0.log" Jan 28 16:07:23 crc kubenswrapper[4959]: I0128 16:07:23.612682 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-765668569f-qcwbm_d9f17334-8180-44ba-ab9d-28c3a24c56ee/manager/0.log" Jan 28 16:07:23 crc kubenswrapper[4959]: I0128 16:07:23.822949 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-67bf948998-zrxc4_edfcc729-f49c-4831-898c-448d88ae3236/manager/0.log" Jan 28 16:07:23 crc kubenswrapper[4959]: I0128 16:07:23.844551 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-694c5bfc85-njl9c_868c4023-b09a-433a-8a12-ff02629c4ff2/manager/0.log" Jan 28 16:07:24 crc kubenswrapper[4959]: I0128 16:07:24.239715 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-5c765b4558-mls2r_9d84ca34-1566-41d9-a8aa-083d9c405581/manager/0.log" Jan 28 16:07:24 crc kubenswrapper[4959]: I0128 16:07:24.295121 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-ddcbfd695-vffmq_ef13bfff-4e74-42e2-96b7-c9e1dff84e92/manager/0.log" Jan 28 16:07:24 crc kubenswrapper[4959]: I0128 16:07:24.407000 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-59c4b45c4dr8fp8_4a538287-5d5b-4ccf-8ed1-570fdc5e5d0a/manager/0.log" Jan 28 16:07:24 crc kubenswrapper[4959]: I0128 16:07:24.573455 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-678d9cfb88-q22tx_6d73f93c-c6ca-4f91-828e-ed160e528063/operator/0.log" Jan 28 16:07:24 crc kubenswrapper[4959]: I0128 16:07:24.936351 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-6t6ww_9131b387-d052-4f47-9c63-251dafdaf37f/registry-server/0.log" Jan 28 16:07:25 crc kubenswrapper[4959]: I0128 16:07:25.045873 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-57d89bf95c-wkcdv_ab83fff6-1016-4052-adf6-13c1ac8b832c/manager/0.log" Jan 28 16:07:25 crc kubenswrapper[4959]: I0128 16:07:25.155354 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-788c46999f-zpcjn_f5ea12bf-b2a9-4a8a-8ac8-967b92e0081d/manager/0.log" Jan 28 16:07:25 crc kubenswrapper[4959]: I0128 16:07:25.234862 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b964cf4cd-tkl2z_3fc1a295-6fb3-43e0-9379-eeb8b63a82a8/manager/0.log" Jan 28 16:07:25 crc kubenswrapper[4959]: I0128 16:07:25.356837 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-mcn29_d5c6598b-38d8-4598-ac7e-862426f8c0d6/operator/0.log" Jan 28 16:07:25 crc kubenswrapper[4959]: I0128 16:07:25.493521 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-68fc8c869-z872n_b0a64600-f6b7-4c15-9b34-3b3c3bdbdf10/manager/0.log" Jan 28 16:07:25 crc kubenswrapper[4959]: I0128 16:07:25.614919 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d69b9c5db-xkcll_f2627922-faf4-4e19-8084-0a10b175546b/manager/0.log" Jan 28 16:07:25 crc kubenswrapper[4959]: I0128 16:07:25.729347 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-56f8bfcd9f-td678_7607b89f-4c59-4c4c-9b63-3b25f4d653cf/manager/0.log" Jan 28 16:07:25 crc kubenswrapper[4959]: I0128 16:07:25.866420 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-767b8bc766-rs272_91dbf1cd-c113-4dfb-bb3f-28d35d7994b2/manager/0.log" Jan 28 16:07:27 crc kubenswrapper[4959]: I0128 16:07:27.597736 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:07:27 crc kubenswrapper[4959]: E0128 16:07:27.598040 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:07:30 crc kubenswrapper[4959]: I0128 16:07:30.086214 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:07:30 crc kubenswrapper[4959]: I0128 16:07:30.134801 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:07:30 crc kubenswrapper[4959]: I0128 16:07:30.945631 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvkpj"] Jan 28 16:07:31 crc kubenswrapper[4959]: I0128 16:07:31.592085 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tvkpj" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" containerID="cri-o://eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d" gracePeriod=2 Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.064031 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.182200 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-catalog-content\") pod \"aa87f946-ebff-4bde-b6e1-cb66757459c2\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.182275 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zctnj\" (UniqueName: \"kubernetes.io/projected/aa87f946-ebff-4bde-b6e1-cb66757459c2-kube-api-access-zctnj\") pod \"aa87f946-ebff-4bde-b6e1-cb66757459c2\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.182399 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-utilities\") pod \"aa87f946-ebff-4bde-b6e1-cb66757459c2\" (UID: \"aa87f946-ebff-4bde-b6e1-cb66757459c2\") " Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.183352 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-utilities" (OuterVolumeSpecName: "utilities") pod "aa87f946-ebff-4bde-b6e1-cb66757459c2" (UID: "aa87f946-ebff-4bde-b6e1-cb66757459c2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.189756 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa87f946-ebff-4bde-b6e1-cb66757459c2-kube-api-access-zctnj" (OuterVolumeSpecName: "kube-api-access-zctnj") pod "aa87f946-ebff-4bde-b6e1-cb66757459c2" (UID: "aa87f946-ebff-4bde-b6e1-cb66757459c2"). InnerVolumeSpecName "kube-api-access-zctnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.284759 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zctnj\" (UniqueName: \"kubernetes.io/projected/aa87f946-ebff-4bde-b6e1-cb66757459c2-kube-api-access-zctnj\") on node \"crc\" DevicePath \"\"" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.285085 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.341393 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aa87f946-ebff-4bde-b6e1-cb66757459c2" (UID: "aa87f946-ebff-4bde-b6e1-cb66757459c2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.386441 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa87f946-ebff-4bde-b6e1-cb66757459c2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.608730 4959 generic.go:334] "Generic (PLEG): container finished" podID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerID="eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d" exitCode=0 Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.608786 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkpj" event={"ID":"aa87f946-ebff-4bde-b6e1-cb66757459c2","Type":"ContainerDied","Data":"eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d"} Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.608801 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvkpj" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.608821 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvkpj" event={"ID":"aa87f946-ebff-4bde-b6e1-cb66757459c2","Type":"ContainerDied","Data":"acf00ce2b552b5a4a95392fe8d9b3a7bedb9a5480d1fe05735cc89b5e370e22b"} Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.608841 4959 scope.go:117] "RemoveContainer" containerID="eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.640048 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvkpj"] Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.649585 4959 scope.go:117] "RemoveContainer" containerID="9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.652216 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tvkpj"] Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.675074 4959 scope.go:117] "RemoveContainer" containerID="e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.714678 4959 scope.go:117] "RemoveContainer" containerID="eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d" Jan 28 16:07:32 crc kubenswrapper[4959]: E0128 16:07:32.715262 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d\": container with ID starting with eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d not found: ID does not exist" containerID="eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.715298 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d"} err="failed to get container status \"eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d\": rpc error: code = NotFound desc = could not find container \"eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d\": container with ID starting with eb4d348343ffe560d25c961a7e49a92ff97c8794c7b6671472be2085db6fc35d not found: ID does not exist" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.715321 4959 scope.go:117] "RemoveContainer" containerID="9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748" Jan 28 16:07:32 crc kubenswrapper[4959]: E0128 16:07:32.715794 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748\": container with ID starting with 9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748 not found: ID does not exist" containerID="9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.715830 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748"} err="failed to get container status \"9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748\": rpc error: code = NotFound desc = could not find container \"9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748\": container with ID starting with 9428ea2ffbd31288285fa812b82d34b981c77c24bf94a9806b22e1e2cac4e748 not found: ID does not exist" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.715850 4959 scope.go:117] "RemoveContainer" containerID="e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514" Jan 28 16:07:32 crc kubenswrapper[4959]: E0128 16:07:32.716323 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514\": container with ID starting with e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514 not found: ID does not exist" containerID="e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514" Jan 28 16:07:32 crc kubenswrapper[4959]: I0128 16:07:32.716365 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514"} err="failed to get container status \"e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514\": rpc error: code = NotFound desc = could not find container \"e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514\": container with ID starting with e193b3431db3c3c9b832f483d8f4a1773cd39f797d30f3fd1b5f7835aae1b514 not found: ID does not exist" Jan 28 16:07:34 crc kubenswrapper[4959]: I0128 16:07:34.597936 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" path="/var/lib/kubelet/pods/aa87f946-ebff-4bde-b6e1-cb66757459c2/volumes" Jan 28 16:07:42 crc kubenswrapper[4959]: I0128 16:07:42.587824 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:07:42 crc kubenswrapper[4959]: E0128 16:07:42.589524 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:07:45 crc kubenswrapper[4959]: I0128 16:07:45.835070 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-884qg_d2d96b34-2e44-4d18-a591-2c286c762bf9/control-plane-machine-set-operator/0.log" Jan 28 16:07:45 crc kubenswrapper[4959]: I0128 16:07:45.977787 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-fphck_ce02fde1-7de1-4456-9fa4-6591a4b18b9c/kube-rbac-proxy/0.log" Jan 28 16:07:46 crc kubenswrapper[4959]: I0128 16:07:46.003706 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-fphck_ce02fde1-7de1-4456-9fa4-6591a4b18b9c/machine-api-operator/0.log" Jan 28 16:07:53 crc kubenswrapper[4959]: I0128 16:07:53.587425 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:07:53 crc kubenswrapper[4959]: E0128 16:07:53.588574 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:07:58 crc kubenswrapper[4959]: I0128 16:07:58.433446 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-mhfkm_f8b4d3bd-6f12-4998-9613-1e9e1e092cbe/cert-manager-controller/0.log" Jan 28 16:07:58 crc kubenswrapper[4959]: I0128 16:07:58.605501 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-5xkqq_27a0974f-8766-47d5-aea4-65d8abada350/cert-manager-cainjector/0.log" Jan 28 16:07:58 crc kubenswrapper[4959]: I0128 16:07:58.691691 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-4m52p_11a69736-0d82-4d12-8618-7dacd4800aac/cert-manager-webhook/0.log" Jan 28 16:08:06 crc kubenswrapper[4959]: I0128 16:08:06.587707 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:08:06 crc kubenswrapper[4959]: E0128 16:08:06.588566 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:08:10 crc kubenswrapper[4959]: I0128 16:08:10.329414 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-nqbr4_9230b3bb-f87d-4da9-83f5-624c4cc250eb/nmstate-console-plugin/0.log" Jan 28 16:08:10 crc kubenswrapper[4959]: I0128 16:08:10.538846 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-d7gzv_f9551657-7ead-43f2-ab0d-0a00ca38d632/nmstate-handler/0.log" Jan 28 16:08:10 crc kubenswrapper[4959]: I0128 16:08:10.559926 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-ffgf2_7c5e3f8a-ef22-47d0-99e2-3dc85615a832/kube-rbac-proxy/0.log" Jan 28 16:08:10 crc kubenswrapper[4959]: I0128 16:08:10.658887 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-ffgf2_7c5e3f8a-ef22-47d0-99e2-3dc85615a832/nmstate-metrics/0.log" Jan 28 16:08:10 crc kubenswrapper[4959]: I0128 16:08:10.753787 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-xvpdr_a0e99b55-ca45-4cbf-9141-7ee49d98b970/nmstate-operator/0.log" Jan 28 16:08:10 crc kubenswrapper[4959]: I0128 16:08:10.837881 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-jg2rh_4af3461f-6819-4c66-a2e5-3bdcb0d20557/nmstate-webhook/0.log" Jan 28 16:08:18 crc kubenswrapper[4959]: I0128 16:08:18.587299 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:08:18 crc kubenswrapper[4959]: E0128 16:08:18.588277 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:08:32 crc kubenswrapper[4959]: I0128 16:08:32.589672 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:08:32 crc kubenswrapper[4959]: E0128 16:08:32.590776 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.114863 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-2fnlx_fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb/kube-rbac-proxy/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.307101 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-2fnlx_fb36ee39-935f-4b4a-9f68-d0d10dc6eaeb/controller/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.387736 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-frr-files/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.569750 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-reloader/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.590726 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-metrics/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.607328 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-frr-files/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.638253 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-reloader/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.797304 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-reloader/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.800388 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-metrics/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.808022 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-frr-files/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.829193 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-metrics/0.log" Jan 28 16:08:37 crc kubenswrapper[4959]: I0128 16:08:37.978533 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-reloader/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.000177 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-metrics/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.022536 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/cp-frr-files/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.037555 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/controller/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.203451 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/frr-metrics/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.223284 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/kube-rbac-proxy/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.229273 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/kube-rbac-proxy-frr/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.396634 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/reloader/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.500982 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-rdt2h_49fb622a-9b72-4a64-9d53-31b4871d1fe2/frr-k8s-webhook-server/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.706559 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-bzd7h_0e4cb52a-0e2e-4c10-bd94-ef235dadad01/frr/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.738837 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6bdc8c55c-q6xtb_22993a88-bd9f-44f6-838c-da64f90a0cf3/manager/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.861187 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-75d68c5fb8-wdpd5_c0f19574-cbac-4f58-b1ce-ecd885de0b11/webhook-server/0.log" Jan 28 16:08:38 crc kubenswrapper[4959]: I0128 16:08:38.934628 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-84w8q_1924ef1e-9d1d-407b-a368-885c1f5eb373/kube-rbac-proxy/0.log" Jan 28 16:08:39 crc kubenswrapper[4959]: I0128 16:08:39.223921 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-84w8q_1924ef1e-9d1d-407b-a368-885c1f5eb373/speaker/0.log" Jan 28 16:08:46 crc kubenswrapper[4959]: I0128 16:08:46.587662 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:08:46 crc kubenswrapper[4959]: E0128 16:08:46.588639 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.181129 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z_2973aad5-f211-4e21-a51c-c1d0c79d1e99/util/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.371524 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z_2973aad5-f211-4e21-a51c-c1d0c79d1e99/util/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.395407 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z_2973aad5-f211-4e21-a51c-c1d0c79d1e99/pull/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.419130 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z_2973aad5-f211-4e21-a51c-c1d0c79d1e99/pull/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.590086 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z_2973aad5-f211-4e21-a51c-c1d0c79d1e99/extract/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.624526 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z_2973aad5-f211-4e21-a51c-c1d0c79d1e99/util/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.724404 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcl2b7z_2973aad5-f211-4e21-a51c-c1d0c79d1e99/pull/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.811567 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw_7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a/util/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.975126 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw_7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a/util/0.log" Jan 28 16:08:51 crc kubenswrapper[4959]: I0128 16:08:51.983520 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw_7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a/pull/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.010783 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw_7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a/pull/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.184712 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw_7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a/util/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.187046 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw_7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a/pull/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.202978 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713sfbbw_7c2f29e7-8cc2-47d3-a7f2-b30a5db2f61a/extract/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.398249 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lzhkf_5185c08f-f7c9-4db8-a958-8a57a202824c/extract-utilities/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.569979 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lzhkf_5185c08f-f7c9-4db8-a958-8a57a202824c/extract-content/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.577200 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lzhkf_5185c08f-f7c9-4db8-a958-8a57a202824c/extract-utilities/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.592661 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lzhkf_5185c08f-f7c9-4db8-a958-8a57a202824c/extract-content/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.769375 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lzhkf_5185c08f-f7c9-4db8-a958-8a57a202824c/extract-utilities/0.log" Jan 28 16:08:52 crc kubenswrapper[4959]: I0128 16:08:52.831706 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lzhkf_5185c08f-f7c9-4db8-a958-8a57a202824c/extract-content/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.001391 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7nlb_d3fe0784-f751-4e36-a8ce-804e95f72f12/extract-utilities/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.196297 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7nlb_d3fe0784-f751-4e36-a8ce-804e95f72f12/extract-content/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.243986 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7nlb_d3fe0784-f751-4e36-a8ce-804e95f72f12/extract-utilities/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.291664 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lzhkf_5185c08f-f7c9-4db8-a958-8a57a202824c/registry-server/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.297425 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7nlb_d3fe0784-f751-4e36-a8ce-804e95f72f12/extract-content/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.519294 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7nlb_d3fe0784-f751-4e36-a8ce-804e95f72f12/extract-utilities/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.522528 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7nlb_d3fe0784-f751-4e36-a8ce-804e95f72f12/extract-content/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.740888 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mf7qm_b9302505-07e8-4fd5-b50f-fba85a6feb28/extract-utilities/0.log" Jan 28 16:08:53 crc kubenswrapper[4959]: I0128 16:08:53.797882 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-484gz_ccd0a857-5b20-4589-8d52-b7339fa7524f/marketplace-operator/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.013430 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7nlb_d3fe0784-f751-4e36-a8ce-804e95f72f12/registry-server/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.065653 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mf7qm_b9302505-07e8-4fd5-b50f-fba85a6feb28/extract-content/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.074613 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mf7qm_b9302505-07e8-4fd5-b50f-fba85a6feb28/extract-content/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.088510 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mf7qm_b9302505-07e8-4fd5-b50f-fba85a6feb28/extract-utilities/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.259198 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mf7qm_b9302505-07e8-4fd5-b50f-fba85a6feb28/extract-content/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.291204 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mf7qm_b9302505-07e8-4fd5-b50f-fba85a6feb28/extract-utilities/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.420862 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mf7qm_b9302505-07e8-4fd5-b50f-fba85a6feb28/registry-server/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.490038 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5mbdw_7c497c7c-b2c4-4162-bd50-410dfbc61e46/extract-utilities/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.660062 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5mbdw_7c497c7c-b2c4-4162-bd50-410dfbc61e46/extract-utilities/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.691648 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5mbdw_7c497c7c-b2c4-4162-bd50-410dfbc61e46/extract-content/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.697547 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5mbdw_7c497c7c-b2c4-4162-bd50-410dfbc61e46/extract-content/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.860275 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5mbdw_7c497c7c-b2c4-4162-bd50-410dfbc61e46/extract-content/0.log" Jan 28 16:08:54 crc kubenswrapper[4959]: I0128 16:08:54.870979 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5mbdw_7c497c7c-b2c4-4162-bd50-410dfbc61e46/extract-utilities/0.log" Jan 28 16:08:55 crc kubenswrapper[4959]: I0128 16:08:55.070902 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-5mbdw_7c497c7c-b2c4-4162-bd50-410dfbc61e46/registry-server/0.log" Jan 28 16:09:01 crc kubenswrapper[4959]: I0128 16:09:01.586968 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:09:01 crc kubenswrapper[4959]: E0128 16:09:01.587711 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:09:14 crc kubenswrapper[4959]: I0128 16:09:14.587812 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:09:14 crc kubenswrapper[4959]: E0128 16:09:14.588565 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:09:29 crc kubenswrapper[4959]: I0128 16:09:29.587430 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:09:29 crc kubenswrapper[4959]: E0128 16:09:29.588318 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:09:40 crc kubenswrapper[4959]: I0128 16:09:40.594435 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:09:40 crc kubenswrapper[4959]: E0128 16:09:40.595203 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:09:52 crc kubenswrapper[4959]: I0128 16:09:52.591263 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:09:52 crc kubenswrapper[4959]: E0128 16:09:52.592052 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:10:03 crc kubenswrapper[4959]: I0128 16:10:03.587673 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:10:03 crc kubenswrapper[4959]: E0128 16:10:03.588523 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:10:17 crc kubenswrapper[4959]: I0128 16:10:17.590431 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:10:17 crc kubenswrapper[4959]: E0128 16:10:17.594039 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:10:22 crc kubenswrapper[4959]: I0128 16:10:22.971493 4959 generic.go:334] "Generic (PLEG): container finished" podID="992138c6-26a8-4e87-91e9-813c936ef34d" containerID="e86d8212efa6d6b0c71daa2c3dfb246903eef20dc7ac05364096bcf93d98760d" exitCode=0 Jan 28 16:10:22 crc kubenswrapper[4959]: I0128 16:10:22.971593 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-v99kd/must-gather-m8bqf" event={"ID":"992138c6-26a8-4e87-91e9-813c936ef34d","Type":"ContainerDied","Data":"e86d8212efa6d6b0c71daa2c3dfb246903eef20dc7ac05364096bcf93d98760d"} Jan 28 16:10:22 crc kubenswrapper[4959]: I0128 16:10:22.972629 4959 scope.go:117] "RemoveContainer" containerID="e86d8212efa6d6b0c71daa2c3dfb246903eef20dc7ac05364096bcf93d98760d" Jan 28 16:10:23 crc kubenswrapper[4959]: I0128 16:10:23.741428 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-v99kd_must-gather-m8bqf_992138c6-26a8-4e87-91e9-813c936ef34d/gather/0.log" Jan 28 16:10:28 crc kubenswrapper[4959]: I0128 16:10:28.586603 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:10:28 crc kubenswrapper[4959]: E0128 16:10:28.587469 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:10:31 crc kubenswrapper[4959]: I0128 16:10:31.166419 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-v99kd/must-gather-m8bqf"] Jan 28 16:10:31 crc kubenswrapper[4959]: I0128 16:10:31.166864 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-v99kd/must-gather-m8bqf" podUID="992138c6-26a8-4e87-91e9-813c936ef34d" containerName="copy" containerID="cri-o://9a65cbf4c681e1eb6ba69fe9de07c585bd28c840cb65463a3eed46a96f2c5c3d" gracePeriod=2 Jan 28 16:10:31 crc kubenswrapper[4959]: I0128 16:10:31.173600 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-v99kd/must-gather-m8bqf"] Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.036500 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-v99kd_must-gather-m8bqf_992138c6-26a8-4e87-91e9-813c936ef34d/copy/0.log" Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.037284 4959 generic.go:334] "Generic (PLEG): container finished" podID="992138c6-26a8-4e87-91e9-813c936ef34d" containerID="9a65cbf4c681e1eb6ba69fe9de07c585bd28c840cb65463a3eed46a96f2c5c3d" exitCode=143 Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.037346 4959 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ace7701864f897ab926186569c178b9472475ddd0b37f449f0535584521ee0b1" Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.051126 4959 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-v99kd_must-gather-m8bqf_992138c6-26a8-4e87-91e9-813c936ef34d/copy/0.log" Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.051504 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.203748 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/992138c6-26a8-4e87-91e9-813c936ef34d-must-gather-output\") pod \"992138c6-26a8-4e87-91e9-813c936ef34d\" (UID: \"992138c6-26a8-4e87-91e9-813c936ef34d\") " Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.204438 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llm7v\" (UniqueName: \"kubernetes.io/projected/992138c6-26a8-4e87-91e9-813c936ef34d-kube-api-access-llm7v\") pod \"992138c6-26a8-4e87-91e9-813c936ef34d\" (UID: \"992138c6-26a8-4e87-91e9-813c936ef34d\") " Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.241798 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/992138c6-26a8-4e87-91e9-813c936ef34d-kube-api-access-llm7v" (OuterVolumeSpecName: "kube-api-access-llm7v") pod "992138c6-26a8-4e87-91e9-813c936ef34d" (UID: "992138c6-26a8-4e87-91e9-813c936ef34d"). InnerVolumeSpecName "kube-api-access-llm7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.306210 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llm7v\" (UniqueName: \"kubernetes.io/projected/992138c6-26a8-4e87-91e9-813c936ef34d-kube-api-access-llm7v\") on node \"crc\" DevicePath \"\"" Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.384272 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/992138c6-26a8-4e87-91e9-813c936ef34d-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "992138c6-26a8-4e87-91e9-813c936ef34d" (UID: "992138c6-26a8-4e87-91e9-813c936ef34d"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.409230 4959 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/992138c6-26a8-4e87-91e9-813c936ef34d-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 28 16:10:32 crc kubenswrapper[4959]: I0128 16:10:32.596920 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="992138c6-26a8-4e87-91e9-813c936ef34d" path="/var/lib/kubelet/pods/992138c6-26a8-4e87-91e9-813c936ef34d/volumes" Jan 28 16:10:33 crc kubenswrapper[4959]: I0128 16:10:33.043605 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-v99kd/must-gather-m8bqf" Jan 28 16:10:41 crc kubenswrapper[4959]: I0128 16:10:41.587173 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:10:41 crc kubenswrapper[4959]: E0128 16:10:41.588272 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:10:55 crc kubenswrapper[4959]: I0128 16:10:55.588276 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:10:55 crc kubenswrapper[4959]: E0128 16:10:55.589460 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:11:09 crc kubenswrapper[4959]: I0128 16:11:09.586864 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:11:09 crc kubenswrapper[4959]: E0128 16:11:09.588655 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:11:24 crc kubenswrapper[4959]: I0128 16:11:24.588343 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:11:24 crc kubenswrapper[4959]: E0128 16:11:24.589169 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:11:36 crc kubenswrapper[4959]: I0128 16:11:36.587927 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:11:36 crc kubenswrapper[4959]: E0128 16:11:36.588883 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:11:47 crc kubenswrapper[4959]: I0128 16:11:47.588054 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:11:47 crc kubenswrapper[4959]: E0128 16:11:47.589173 4959 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-r75mw_openshift-machine-config-operator(f22b9702-cd33-405b-9cea-babf675908f5)\"" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" podUID="f22b9702-cd33-405b-9cea-babf675908f5" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.322797 4959 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jq4zh"] Jan 28 16:11:53 crc kubenswrapper[4959]: E0128 16:11:53.323801 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="extract-utilities" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.323820 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="extract-utilities" Jan 28 16:11:53 crc kubenswrapper[4959]: E0128 16:11:53.323836 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="extract-content" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.323845 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="extract-content" Jan 28 16:11:53 crc kubenswrapper[4959]: E0128 16:11:53.323862 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4773737a-28c8-460d-9f6d-58207682755c" containerName="container-00" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.323871 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="4773737a-28c8-460d-9f6d-58207682755c" containerName="container-00" Jan 28 16:11:53 crc kubenswrapper[4959]: E0128 16:11:53.323885 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.323893 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" Jan 28 16:11:53 crc kubenswrapper[4959]: E0128 16:11:53.323909 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="992138c6-26a8-4e87-91e9-813c936ef34d" containerName="copy" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.323917 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="992138c6-26a8-4e87-91e9-813c936ef34d" containerName="copy" Jan 28 16:11:53 crc kubenswrapper[4959]: E0128 16:11:53.323936 4959 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="992138c6-26a8-4e87-91e9-813c936ef34d" containerName="gather" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.323945 4959 state_mem.go:107] "Deleted CPUSet assignment" podUID="992138c6-26a8-4e87-91e9-813c936ef34d" containerName="gather" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.324218 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="992138c6-26a8-4e87-91e9-813c936ef34d" containerName="gather" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.324245 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="992138c6-26a8-4e87-91e9-813c936ef34d" containerName="copy" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.324266 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa87f946-ebff-4bde-b6e1-cb66757459c2" containerName="registry-server" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.324281 4959 memory_manager.go:354] "RemoveStaleState removing state" podUID="4773737a-28c8-460d-9f6d-58207682755c" containerName="container-00" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.325864 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.340820 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jq4zh"] Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.485058 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dscxv\" (UniqueName: \"kubernetes.io/projected/f232a8d6-2de4-450e-a4b4-628a02fae5c0-kube-api-access-dscxv\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.485225 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-utilities\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.485251 4959 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-catalog-content\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.590772 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-utilities\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.590814 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-catalog-content\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.590903 4959 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dscxv\" (UniqueName: \"kubernetes.io/projected/f232a8d6-2de4-450e-a4b4-628a02fae5c0-kube-api-access-dscxv\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.591282 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-utilities\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.591316 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-catalog-content\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.607781 4959 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dscxv\" (UniqueName: \"kubernetes.io/projected/f232a8d6-2de4-450e-a4b4-628a02fae5c0-kube-api-access-dscxv\") pod \"community-operators-jq4zh\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:53 crc kubenswrapper[4959]: I0128 16:11:53.647174 4959 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:11:54 crc kubenswrapper[4959]: I0128 16:11:54.149704 4959 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jq4zh"] Jan 28 16:11:54 crc kubenswrapper[4959]: I0128 16:11:54.624523 4959 generic.go:334] "Generic (PLEG): container finished" podID="f232a8d6-2de4-450e-a4b4-628a02fae5c0" containerID="be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5" exitCode=0 Jan 28 16:11:54 crc kubenswrapper[4959]: I0128 16:11:54.624574 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jq4zh" event={"ID":"f232a8d6-2de4-450e-a4b4-628a02fae5c0","Type":"ContainerDied","Data":"be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5"} Jan 28 16:11:54 crc kubenswrapper[4959]: I0128 16:11:54.624604 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jq4zh" event={"ID":"f232a8d6-2de4-450e-a4b4-628a02fae5c0","Type":"ContainerStarted","Data":"fdb3161b112a467e0109ee2b6023a53237ef9e0e5cfb222cf2569cc409b253c1"} Jan 28 16:11:54 crc kubenswrapper[4959]: I0128 16:11:54.626813 4959 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 16:11:56 crc kubenswrapper[4959]: I0128 16:11:56.644848 4959 generic.go:334] "Generic (PLEG): container finished" podID="f232a8d6-2de4-450e-a4b4-628a02fae5c0" containerID="ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691" exitCode=0 Jan 28 16:11:56 crc kubenswrapper[4959]: I0128 16:11:56.644952 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jq4zh" event={"ID":"f232a8d6-2de4-450e-a4b4-628a02fae5c0","Type":"ContainerDied","Data":"ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691"} Jan 28 16:11:57 crc kubenswrapper[4959]: I0128 16:11:57.656613 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jq4zh" event={"ID":"f232a8d6-2de4-450e-a4b4-628a02fae5c0","Type":"ContainerStarted","Data":"10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca"} Jan 28 16:11:57 crc kubenswrapper[4959]: I0128 16:11:57.687935 4959 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jq4zh" podStartSLOduration=2.262758552 podStartE2EDuration="4.687862637s" podCreationTimestamp="2026-01-28 16:11:53 +0000 UTC" firstStartedPulling="2026-01-28 16:11:54.626515138 +0000 UTC m=+3298.072421521" lastFinishedPulling="2026-01-28 16:11:57.051619223 +0000 UTC m=+3300.497525606" observedRunningTime="2026-01-28 16:11:57.674693625 +0000 UTC m=+3301.120600038" watchObservedRunningTime="2026-01-28 16:11:57.687862637 +0000 UTC m=+3301.133769030" Jan 28 16:12:00 crc kubenswrapper[4959]: I0128 16:12:00.592665 4959 scope.go:117] "RemoveContainer" containerID="eb7a7af73f790b1ebfa66c29b11ada61edda0fee61b9211aec47c4db4f969e82" Jan 28 16:12:01 crc kubenswrapper[4959]: I0128 16:12:01.693645 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-r75mw" event={"ID":"f22b9702-cd33-405b-9cea-babf675908f5","Type":"ContainerStarted","Data":"bbed6b74710d356fffc3c1d164782be5da5b781653eaab6bbfec16a282192f45"} Jan 28 16:12:03 crc kubenswrapper[4959]: I0128 16:12:03.647340 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:12:03 crc kubenswrapper[4959]: I0128 16:12:03.647701 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:12:03 crc kubenswrapper[4959]: I0128 16:12:03.700465 4959 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:12:03 crc kubenswrapper[4959]: I0128 16:12:03.754373 4959 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:12:03 crc kubenswrapper[4959]: I0128 16:12:03.936222 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jq4zh"] Jan 28 16:12:05 crc kubenswrapper[4959]: I0128 16:12:05.721120 4959 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jq4zh" podUID="f232a8d6-2de4-450e-a4b4-628a02fae5c0" containerName="registry-server" containerID="cri-o://10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca" gracePeriod=2 Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.138100 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.202157 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-catalog-content\") pod \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.202203 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dscxv\" (UniqueName: \"kubernetes.io/projected/f232a8d6-2de4-450e-a4b4-628a02fae5c0-kube-api-access-dscxv\") pod \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.202232 4959 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-utilities\") pod \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\" (UID: \"f232a8d6-2de4-450e-a4b4-628a02fae5c0\") " Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.203650 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-utilities" (OuterVolumeSpecName: "utilities") pod "f232a8d6-2de4-450e-a4b4-628a02fae5c0" (UID: "f232a8d6-2de4-450e-a4b4-628a02fae5c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.207869 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f232a8d6-2de4-450e-a4b4-628a02fae5c0-kube-api-access-dscxv" (OuterVolumeSpecName: "kube-api-access-dscxv") pod "f232a8d6-2de4-450e-a4b4-628a02fae5c0" (UID: "f232a8d6-2de4-450e-a4b4-628a02fae5c0"). InnerVolumeSpecName "kube-api-access-dscxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.267688 4959 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f232a8d6-2de4-450e-a4b4-628a02fae5c0" (UID: "f232a8d6-2de4-450e-a4b4-628a02fae5c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.303867 4959 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.303903 4959 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dscxv\" (UniqueName: \"kubernetes.io/projected/f232a8d6-2de4-450e-a4b4-628a02fae5c0-kube-api-access-dscxv\") on node \"crc\" DevicePath \"\"" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.303915 4959 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f232a8d6-2de4-450e-a4b4-628a02fae5c0-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.729481 4959 generic.go:334] "Generic (PLEG): container finished" podID="f232a8d6-2de4-450e-a4b4-628a02fae5c0" containerID="10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca" exitCode=0 Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.729525 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jq4zh" event={"ID":"f232a8d6-2de4-450e-a4b4-628a02fae5c0","Type":"ContainerDied","Data":"10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca"} Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.729553 4959 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jq4zh" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.729571 4959 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jq4zh" event={"ID":"f232a8d6-2de4-450e-a4b4-628a02fae5c0","Type":"ContainerDied","Data":"fdb3161b112a467e0109ee2b6023a53237ef9e0e5cfb222cf2569cc409b253c1"} Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.729588 4959 scope.go:117] "RemoveContainer" containerID="10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.751633 4959 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jq4zh"] Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.753766 4959 scope.go:117] "RemoveContainer" containerID="ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.775788 4959 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jq4zh"] Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.789342 4959 scope.go:117] "RemoveContainer" containerID="be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.823263 4959 scope.go:117] "RemoveContainer" containerID="10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca" Jan 28 16:12:06 crc kubenswrapper[4959]: E0128 16:12:06.823819 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca\": container with ID starting with 10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca not found: ID does not exist" containerID="10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.823901 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca"} err="failed to get container status \"10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca\": rpc error: code = NotFound desc = could not find container \"10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca\": container with ID starting with 10e9528c05b15b3e70a44dcd626707a7f9b97d2964065149109336f2693770ca not found: ID does not exist" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.823966 4959 scope.go:117] "RemoveContainer" containerID="ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691" Jan 28 16:12:06 crc kubenswrapper[4959]: E0128 16:12:06.824598 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691\": container with ID starting with ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691 not found: ID does not exist" containerID="ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.824632 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691"} err="failed to get container status \"ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691\": rpc error: code = NotFound desc = could not find container \"ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691\": container with ID starting with ebfed7b084762f08e05678b0d0ec8157b08ce286f9394e18e053ee299dfcc691 not found: ID does not exist" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.824647 4959 scope.go:117] "RemoveContainer" containerID="be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5" Jan 28 16:12:06 crc kubenswrapper[4959]: E0128 16:12:06.824922 4959 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5\": container with ID starting with be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5 not found: ID does not exist" containerID="be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5" Jan 28 16:12:06 crc kubenswrapper[4959]: I0128 16:12:06.824960 4959 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5"} err="failed to get container status \"be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5\": rpc error: code = NotFound desc = could not find container \"be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5\": container with ID starting with be44cb5beef184a51af819e7904e70a2a968d8eafe92e4a649880333350b5ee5 not found: ID does not exist" Jan 28 16:12:08 crc kubenswrapper[4959]: I0128 16:12:08.600279 4959 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f232a8d6-2de4-450e-a4b4-628a02fae5c0" path="/var/lib/kubelet/pods/f232a8d6-2de4-450e-a4b4-628a02fae5c0/volumes" Jan 28 16:12:17 crc kubenswrapper[4959]: I0128 16:12:17.830988 4959 scope.go:117] "RemoveContainer" containerID="9a65cbf4c681e1eb6ba69fe9de07c585bd28c840cb65463a3eed46a96f2c5c3d" Jan 28 16:12:17 crc kubenswrapper[4959]: I0128 16:12:17.852360 4959 scope.go:117] "RemoveContainer" containerID="e86d8212efa6d6b0c71daa2c3dfb246903eef20dc7ac05364096bcf93d98760d" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136432745024457 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136432745017374 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136423614016512 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136423615015463 5ustar corecore